diff options
Diffstat (limited to 'module/plugins/hoster')
| -rw-r--r-- | module/plugins/hoster/BoltsharingCom.py | 9 | ||||
| -rw-r--r-- | module/plugins/hoster/FileApeCom.py | 56 | ||||
| -rw-r--r-- | module/plugins/hoster/FilebeerInfo.py | 3 | ||||
| -rw-r--r-- | module/plugins/hoster/HellspyCz.py | 47 | ||||
| -rw-r--r-- | module/plugins/hoster/IcyFilesCom.py | 88 | ||||
| -rw-r--r-- | module/plugins/hoster/Share76Com.py | 10 | ||||
| -rw-r--r-- | module/plugins/hoster/SharebeesCom.py | 12 | ||||
| -rw-r--r-- | module/plugins/hoster/ShragleCom.py | 95 | ||||
| -rw-r--r-- | module/plugins/hoster/SpeedLoadOrg.py | 1 | ||||
| -rw-r--r-- | module/plugins/hoster/SpeedfileCz.py | 44 | ||||
| -rw-r--r-- | module/plugins/hoster/TurbouploadCom.py | 20 | ||||
| -rw-r--r-- | module/plugins/hoster/UploadStationCom.py | 19 | ||||
| -rw-r--r-- | module/plugins/hoster/WuploadCom.py | 230 | ||||
| -rw-r--r-- | module/plugins/hoster/X7To.py | 80 | 
14 files changed, 50 insertions, 664 deletions
| diff --git a/module/plugins/hoster/BoltsharingCom.py b/module/plugins/hoster/BoltsharingCom.py index f9cc91ca5..cc8b1c7e6 100644 --- a/module/plugins/hoster/BoltsharingCom.py +++ b/module/plugins/hoster/BoltsharingCom.py @@ -1,17 +1,16 @@  # -*- coding: utf-8 -*- -from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class BoltsharingCom(XFileSharingPro): + +class BoltsharingCom(DeadHoster):      __name__ = "BoltsharingCom"      __type__ = "hoster"      __pattern__ = r"http://(?:\w*\.)*?boltsharing.com/\w{12}" -    __version__ = "0.01" +    __version__ = "0.02"      __description__ = """Boltsharing.com hoster plugin"""      __author_name__ = ("zoidberg")      __author_mail__ = ("zoidberg@mujmail.cz") -    HOSTER_NAME = "boltsharing.com" -  getInfo = create_getInfo(BoltsharingCom) diff --git a/module/plugins/hoster/FileApeCom.py b/module/plugins/hoster/FileApeCom.py index f07fbfc8a..2a5daf16a 100644 --- a/module/plugins/hoster/FileApeCom.py +++ b/module/plugins/hoster/FileApeCom.py @@ -1,64 +1,16 @@  #!/usr/bin/env python  # -*- coding: utf-8 -*- -import re +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -from module.plugins.Hoster import Hoster - -class FileApeCom(Hoster): +class FileApeCom(DeadHoster):      __name__ = "FileApeCom"      __type__ = "hoster"      __pattern__ = r"http://(www\.)?fileape\.com/(index\.php\?act=download\&id=|dl/)\w+" -    __version__ = "0.11" +    __version__ = "0.12"      __description__ = """FileApe Download Hoster"""      __author_name__ = ("espes") -    def setup(self): -        self.multiDL = False - -    def process(self, pyfile): -        self.pyfile = pyfile - -        self.html = self.load(self.pyfile.url) - -        if "This file is either temporarily unavailable or does not exist" in self.html: -            self.offline() - -        self.html = self.load(self.pyfile.url + "&g=1") - -        continueMatch = re.search(r"window\.location = '(http://.*?)'", self.html) -        if not continueMatch: -            continueMatch = re.search(r"'(http://fileape\.com/\?act=download&t=[A-Za-z0-9_-]+)'", self.html) -        if continueMatch: -            continuePage = continueMatch.group(1) -        else: -            self.fail("Plugin Defect") - -        wait = 60 -        waitMatch = re.search( -            "id=\"waitnumber\" style=\"font-size:2em; text-align:center; width:33px; height:33px;\">(\\d+)</span>", -            self.html) -        if waitMatch: -            wait = int(waitMatch.group(1)) -        self.setWait(wait + 3) -        self.wait() - -        self.html = self.load(continuePage) -        linkMatch = re.search(r"<div style=\"text-align:center; font-size: 30px;\"><a href=\"(http://.*?)\"", self.html) - -        if not linkMatch: -            linkMatch = re.search(r"\"(http://tx\d+\.fileape\.com/[a-z]+/.*?)\"", self.html) -        if linkMatch: -            link = linkMatch.group(1) -        else: -            self.fail("Plugin Defect") - -        pyfile.name = link.rpartition('/')[2] - -        self.download(link) -        check = self.checkDownload({"exp": "Download ticket expired"}) -        if check == "exp": -            self.logInfo("Ticket expired, retrying...") -            self.retry() +getInfo = create_getInfo(FileApeCom) diff --git a/module/plugins/hoster/FilebeerInfo.py b/module/plugins/hoster/FilebeerInfo.py index 216ecfbca..d1bef9e6e 100644 --- a/module/plugins/hoster/FilebeerInfo.py +++ b/module/plugins/hoster/FilebeerInfo.py @@ -1,4 +1,5 @@  # -*- coding: utf-8 -*- +  from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo @@ -12,4 +13,4 @@ class FilebeerInfo(DeadHoster):      __author_mail__ = ("zoidberg@mujmail.cz") -getInfo = create_getInfo(FilebeerInfo)
\ No newline at end of file +getInfo = create_getInfo(FilebeerInfo) diff --git a/module/plugins/hoster/HellspyCz.py b/module/plugins/hoster/HellspyCz.py index 2e0746ff4..1bb595599 100644 --- a/module/plugins/hoster/HellspyCz.py +++ b/module/plugins/hoster/HellspyCz.py @@ -16,58 +16,17 @@      @author: zoidberg  """ -import re -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class HellspyCz(SimpleHoster): +class HellspyCz(DeadHoster):      __name__ = "HellspyCz"      __type__ = "hoster"      __pattern__ = r"http://(?:\w*\.)*(?:hellspy\.(?:cz|com|sk|hu|pl)|sciagaj.pl)(/\S+/\d+)/?.*" -    __version__ = "0.27" +    __version__ = "0.28"      __description__ = """HellSpy.cz"""      __author_name__ = ("zoidberg")      __author_mail__ = ("zoidberg@mujmail.cz") -    FILE_SIZE_PATTERN = r'<span class="filesize right">(?P<S>[0-9.]+)\s*<span>(?P<U>[kKMG])i?B' -    FILE_NAME_PATTERN = r'<h1 title="(?P<N>.*?)"' -    FILE_OFFLINE_PATTERN = r'<h2>(404 - Page|File) not found</h2>' -    FILE_URL_REPLACEMENTS = [(__pattern__, r"http://www.hellspy.com\1")] - -    CREDIT_LEFT_PATTERN = r'<strong>Credits: </strong>\s*(\d+)' -    DOWNLOAD_AGAIN_PATTERN = r'<a id="button-download-start"[^>]*title="You can download the file without deducting your credit.">' -    DOWNLOAD_URL_PATTERN = r"launchFullDownload\('([^']+)'" - -    def setup(self): -        self.resumeDownload = self.multiDL = True -        self.chunkLimit = 1 - -    def handleFree(self): -        self.fail("Only premium users can download from HellSpy.cz") - -    def handlePremium(self): -        # set PHPSESSID cookie -        cj = self.account.getAccountCookies(self.user) -        cj.setCookie(".hellspy.com", "PHPSESSID", self.account.phpsessid) -        self.logDebug("PHPSESSID: " + cj.getCookie("PHPSESSID")) - -        info = self.account.getAccountInfo(self.user, True) -        self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"] / 1024)) - -        if self.pyfile.size / 1024 > info["trafficleft"]: -            self.logWarning("Not enough credit left to download file") - -        # get premium download URL and download -        self.html = self.load(self.pyfile.url + "?download=1") -        found = re.search(self.DOWNLOAD_URL_PATTERN, self.html) -        if not found: -            self.parseError("Download URL") -        url = found.group(1) -        self.logDebug("Download URL: " + url) -        self.download(url) - -        info = self.account.getAccountInfo(self.user, True) -        self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"] / 1024)) -  getInfo = create_getInfo(HellspyCz) diff --git a/module/plugins/hoster/IcyFilesCom.py b/module/plugins/hoster/IcyFilesCom.py index 53c934675..d0b101717 100644 --- a/module/plugins/hoster/IcyFilesCom.py +++ b/module/plugins/hoster/IcyFilesCom.py @@ -16,97 +16,17 @@      @author: godofdream  """ -import re -from module.plugins.Hoster import Hoster -from module.network.RequestFactory import getURL +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -def getInfo(urls): -    result = [] -    for url in urls: -        html = getURL(url, decode=True) -        if re.search(IcyFilesCom.FILE_OFFLINE_PATTERN, html): -            # File offline -            result.append((url, 0, 1, url)) -        else: -            # Get file info -            name = re.search(IcyFilesCom.FILE_NAME_PATTERN, html) -            size = re.search(IcyFilesCom.SIZE_PATTERN, html) -            if name is not None: -                name = name.group(1) -                size = (int(size.group(1)) * 1000000) -                result.append((name, size, 2, url)) -    yield result - - -class IcyFilesCom(Hoster): +class IcyFilesCom(DeadHoster):      __name__ = "IcyFilesCom"      __type__ = "hoster"      __pattern__ = r"http://(?:www\.)?icyfiles\.com/(.*)" -    __version__ = "0.05" +    __version__ = "0.06"      __description__ = """IcyFiles.com plugin - free only"""      __author_name__ = ("godofdream")      __author_mail__ = ("soilfiction@gmail.com") -    FILE_NAME_PATTERN = r'<div id="file">(.*?)</div>' -    SIZE_PATTERN = r'<li>(\d+) <span>Size/mb' -    FILE_OFFLINE_PATTERN = r'The requested File cant be found' -    WAIT_LONGER_PATTERN = r'All download tickets are in use\. please try it again in a few seconds' -    WAIT_PATTERN = r'<div class="counter">(\d+)</div>' -    TOOMUCH_PATTERN = r'Sorry dude, you have downloaded too much\. Please wait (\d+) seconds' - -    def setup(self): -        self.multiDL = False - -    def process(self, pyfile): -        self.html = self.load(pyfile.url, decode=True) -        # check if offline -        if re.search(self.FILE_OFFLINE_PATTERN, self.html): -            self.offline() -            # All Downloadtickets in use -        timmy = re.search(self.WAIT_LONGER_PATTERN, self.html) -        if timmy: -            self.logDebug("waitforfreeslot") -            self.waitForFreeSlot() -            # Wait the waittime -        timmy = re.search(self.WAIT_PATTERN, self.html) -        if timmy: -            self.logDebug("waiting", timmy.group(1)) -            self.setWait(int(timmy.group(1)) + 2, False) -            self.wait() -        # Downloaded to much -        timmy = re.search(self.TOOMUCH_PATTERN, self.html) -        if timmy: -            self.logDebug("too much", timmy.group(1)) -            self.setWait(int(timmy.group(1)), True) -            self.wait() -        # Find Name -        found = re.search(self.FILE_NAME_PATTERN, self.html) -        if found is None: -            self.fail("Parse error (NAME)") -        pyfile.name = found.group(1) -        # Get the URL -        url = pyfile.url -        found = re.search(self.__pattern__, url) -        if found is None: -            self.fail("Parse error (URL)") -        download_url = "http://icyfiles.com/download.php?key=" + found.group(1) -        self.download(download_url) -        # check download -        check = self.checkDownload({ -            "notfound": re.compile(r"^<head><title>404 Not Found</title>$"), -            "skippedcountdown": re.compile(r"^Dont skip the countdown$"), -            "waitforfreeslots": re.compile(self.WAIT_LONGER_PATTERN), -            "downloadedtoomuch": re.compile(self.TOOMUCH_PATTERN) -        }) -        if check == "skippedcountdown": -            self.fail("Countdown error") -        elif check == "notfound": -            self.fail("404 Not found") -        elif check == "waitforfreeslots": -            self.waitForFreeSlot() -        elif check == "downloadedtoomuch": -            self.retry() -    def waitForFreeSlot(self): -        self.retry(60, 60, "Wait for free slot") +getInfo = create_getInfo(IcyFilesCom) diff --git a/module/plugins/hoster/Share76Com.py b/module/plugins/hoster/Share76Com.py index b48780652..81a169527 100644 --- a/module/plugins/hoster/Share76Com.py +++ b/module/plugins/hoster/Share76Com.py @@ -1,17 +1,15 @@  # -*- coding: utf-8 -*- -from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class Share76Com(XFileSharingPro): + +class Share76Com(DeadHoster):      __name__ = "Share76Com"      __type__ = "hoster"      __pattern__ = r"http://(?:\w*\.)*?share76.com/\w{12}" -    __version__ = "0.03" +    __version__ = "0.04"      __description__ = """share76.com hoster plugin"""      __author_name__ = ("me") -    FILE_INFO_PATTERN = r'<h2>\s*File:\s*<font[^>]*>(?P<N>[^>]+)</font>\s*\[<font[^>]*>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B</font>\]</h2>' -    HOSTER_NAME = "share76.com" -  getInfo = create_getInfo(Share76Com) diff --git a/module/plugins/hoster/SharebeesCom.py b/module/plugins/hoster/SharebeesCom.py index cc1173fea..5eaaf24f5 100644 --- a/module/plugins/hoster/SharebeesCom.py +++ b/module/plugins/hoster/SharebeesCom.py @@ -1,20 +1,16 @@  # -*- coding: utf-8 -*- -from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class SharebeesCom(XFileSharingPro): + +class SharebeesCom(DeadHoster):      __name__ = "SharebeesCom"      __type__ = "hoster"      __pattern__ = r"http://(?:\w*\.)*?sharebees.com/\w{12}" -    __version__ = "0.01" +    __version__ = "0.02"      __description__ = """ShareBees hoster plugin"""      __author_name__ = ("zoidberg")      __author_mail__ = ("zoidberg@mujmail.cz") -    FILE_NAME_PATTERN = r'<p class="file-name" ><.*?>\s*(?P<N>.+)' -    FILE_SIZE_PATTERN = r'<small>\((?P<S>\d+) bytes\)</small>' -    FORM_PATTERN = 'F1' -    HOSTER_NAME = "sharebees.com" -  getInfo = create_getInfo(SharebeesCom) diff --git a/module/plugins/hoster/ShragleCom.py b/module/plugins/hoster/ShragleCom.py index 5d19afbc7..2b1a8b80a 100644 --- a/module/plugins/hoster/ShragleCom.py +++ b/module/plugins/hoster/ShragleCom.py @@ -1,104 +1,17 @@  #!/usr/bin/env python  # -*- coding: utf-8 -*- -import re -from pycurl import FOLLOWLOCATION +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -from module.plugins.Hoster import Hoster -from module.plugins.internal.SimpleHoster import parseHtmlForm -from module.plugins.internal.CaptchaService import ReCaptcha -from module.network.RequestFactory import getURL -API_KEY = "078e5ca290d728fd874121030efb4a0d" - - -def parseFileInfo(self, url): -    file_id = re.match(self.__pattern__, url).group('ID') - -    data = getURL("http://www.cloudnator.com/api.php?key=%s&action=getStatus&fileID=%s" % (API_KEY, file_id), -                  decode=True).split() - -    if len(data) == 4: -        name, size, md5, status = data -        size = int(size) - -        if hasattr(self, "check_data"): -            self.checkdata = {"size": size, "md5": md5} - -        return name, size, 2 if status == "0" else 1, url -    else: -        return url, 0, 1, url - - -def getInfo(urls): -    for url in urls: -        file_info = parseFileInfo(ShragleCom, url) -        yield file_info - - -class ShragleCom(Hoster): +class ShragleCom(DeadHoster):      __name__ = "ShragleCom"      __type__ = "hoster"      __pattern__ = r"http://(?:www.)?(cloudnator|shragle).com/files/(?P<ID>.*?)/" -    __version__ = "0.21" +    __version__ = "0.22"      __description__ = """Cloudnator.com (Shragle.com) Download PLugin"""      __author_name__ = ("RaNaN", "zoidberg")      __author_mail__ = ("RaNaN@pyload.org", "zoidberg@mujmail.cz") -    def setup(self): -        self.multiDL = False -        self.check_data = None - -    def process(self, pyfile): -        #get file status and info -        self.pyfile.name, self.pyfile.size, status = parseFileInfo(self, pyfile.url)[:3] -        if status != 2: -            self.offline() - -        self.handleFree() - -    def handleFree(self): -        self.html = self.load(self.pyfile.url) - -        #get wait time -        found = re.search('\s*var\sdownloadWait\s=\s(\d+);', self.html) -        self.setWait(int(found.group(1)) if found else 30) - -        #parse download form -        action, inputs = parseHtmlForm('id="download', self.html) - -        #solve captcha -        found = re.search('recaptcha/api/(?:challenge|noscript)?k=(.+?)', self.html) -        captcha_key = found.group(1) if found else "6LdEFb0SAAAAAAwM70vnYo2AkiVkCx-xmfniatHz" - -        recaptcha = ReCaptcha(self) - -        inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key) -        self.wait() - -        #validate -        self.req.http.c.setopt(FOLLOWLOCATION, 0) -        self.html = self.load(action, post=inputs) - -        found = re.search(r"Location\s*:\s*(\S*)", self.req.http.header, re.I) -        if found: -            self.correctCaptcha() -            download_url = found.group(1) -        else: -            if "Sicherheitscode falsch" in self.html: -                self.invalidCaptcha() -                self.retry(max_tries=5, reason="Invalid captcha") -            else: -                self.fail("Invalid session") - -        #download -        self.req.http.c.setopt(FOLLOWLOCATION, 1) -        self.download(download_url) -        check = self.checkDownload({ -            "ip_blocked": re.compile(r'<div class="error".*IP.*loading') -        }) -        if check == "ip_blocked": -            self.setWait(1800, True) -            self.wait() -            self.retry() +getInfo = create_getInfo(ShragleCom) diff --git a/module/plugins/hoster/SpeedLoadOrg.py b/module/plugins/hoster/SpeedLoadOrg.py index 5687fae85..2360b7773 100644 --- a/module/plugins/hoster/SpeedLoadOrg.py +++ b/module/plugins/hoster/SpeedLoadOrg.py @@ -1,4 +1,5 @@  # -*- coding: utf-8 -*- +  from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo diff --git a/module/plugins/hoster/SpeedfileCz.py b/module/plugins/hoster/SpeedfileCz.py index 3475ea29e..b8eaa775c 100644 --- a/module/plugins/hoster/SpeedfileCz.py +++ b/module/plugins/hoster/SpeedfileCz.py @@ -16,52 +16,16 @@      @author: zoidberg  """ -import re -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class SpeedfileCz(SimpleHoster): +class SpeedfileCz(DeadHoster):      __name__ = "SpeedFileCz"      __type__ = "hoster"      __pattern__ = r"http://speedfile.cz/.*" -    __version__ = "0.31" +    __version__ = "0.32"      __description__ = """speedfile.cz"""      __author_name__ = ("zoidberg") -    FILE_NAME_PATTERN = r'<meta property="og:title" content="(?P<N>[^"]+)" />' -    FILE_SIZE_PATTERN = r'<strong><big>(?P<S>[0-9.]+) (?P<U>[kKMG])i?B' -    URL_PATTERN = r'<a id="request" class="caps" href="([^"]+)" rel="nofollow">' -    FILE_OFFLINE_PATTERN = r'<title>Speedfile \| 404' -    WAIT_PATTERN = r'"requestedAt":(\d+),"allowedAt":(\d+),"adUri"' -    def setup(self): -        self.multiDL = False - -    def process(self, pyfile): -        self.html = self.load(pyfile.url, decode=True) - -        if re.search(self.FILE_OFFLINE_PATTERN, self.html): -            self.offline() - -        found = re.search(self.FILE_NAME_PATTERN, self.html) -        if found is None: -            self.fail("Parse error (NAME)") -        pyfile.name = found.group(1) - -        found = re.search(self.URL_PATTERN, self.html) -        if found is None: -            self.fail("Parse error (URL)") -        download_url = "http://speedfile.cz/" + found.group(1) - -        self.html = self.load(download_url) -        self.logDebug(self.html) -        found = re.search(self.WAIT_PATTERN, self.html) -        if found is None: -            self.fail("Parse error (WAIT)") -        self.setWait(int(found.group(2)) - int(found.group(1))) -        self.wait() - -        self.download(download_url) - - -create_getInfo(SpeedfileCz) +getInfo = create_getInfo(SpeedfileCz) diff --git a/module/plugins/hoster/TurbouploadCom.py b/module/plugins/hoster/TurbouploadCom.py index 12dad7906..1c60c2c87 100644 --- a/module/plugins/hoster/TurbouploadCom.py +++ b/module/plugins/hoster/TurbouploadCom.py @@ -16,31 +16,17 @@      @author: zoidberg  """ -import re -from module.plugins.internal.DeadHoster import DeadHoster as EasybytezCom, create_getInfo +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class TurbouploadCom(EasybytezCom): +class TurbouploadCom(DeadHoster):      __name__ = "TurbouploadCom"      __type__ = "hoster"      __pattern__ = r"http://(?:\w*\.)?turboupload.com/(\w+).*" -    __version__ = "0.02" +    __version__ = "0.03"      __description__ = """turboupload.com"""      __author_name__ = ("zoidberg")      __author_mail__ = ("zoidberg@mujmail.cz") -    # shares code with EasybytezCom - -    DIRECT_LINK_PATTERN = r'<a href="(http://turboupload.com/files/[^"]+)">\1</a>' - -    def handleFree(self): -        self.html = self.load(self.pyfile.url, post=self.getPostParameters(), ref=True, cookies=True) -        found = re.search(self.DIRECT_LINK_PATTERN, self.html) -        if not found: -            self.parseError('Download Link') -        url = found.group(1) -        self.logDebug('URL: ' + url) -        self.download(url) -  getInfo = create_getInfo(TurbouploadCom) diff --git a/module/plugins/hoster/UploadStationCom.py b/module/plugins/hoster/UploadStationCom.py index 2831facac..3583af65d 100644 --- a/module/plugins/hoster/UploadStationCom.py +++ b/module/plugins/hoster/UploadStationCom.py @@ -1,25 +1,16 @@  # -*- coding: utf-8 -*- -from module.plugins.hoster.FileserveCom import FileserveCom, checkFile -from module.plugins.Plugin import chunks +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -class UploadStationCom(FileserveCom): + +class UploadStationCom(DeadHoster):      __name__ = "UploadStationCom"      __type__ = "hoster"      __pattern__ = r"http://(?:www\.)?uploadstation\.com/file/(?P<id>[A-Za-z0-9]+)" -    __version__ = "0.51" +    __version__ = "0.52"      __description__ = """UploadStation.Com File Download Hoster"""      __author_name__ = ("fragonib", "zoidberg")      __author_mail__ = ("fragonib[AT]yahoo[DOT]es", "zoidberg@mujmail.cz") -    URLS = ['http://www.uploadstation.com/file/', 'http://www.uploadstation.com/check-links.php', -            'http://www.uploadstation.com/checkReCaptcha.php'] -    LINKCHECK_TR = r'<div class="details (?:white|grey)">(.*?)\t{9}</div>' -    LINKCHECK_TD = r'<div class="(?:col )?col\d">(?:<[^>]*>| )*([^<]*)' - -    LONG_WAIT_PATTERN = r'<h1>You have to wait (\d+) (\w+) to download the next file\.</h1>' - -def getInfo(urls): -    for chunk in chunks(urls, 100): -        yield checkFile(UploadStationCom, chunk) +getInfo = create_getInfo(UploadStationCom) diff --git a/module/plugins/hoster/WuploadCom.py b/module/plugins/hoster/WuploadCom.py index aaeeb59fd..68e83e228 100644 --- a/module/plugins/hoster/WuploadCom.py +++ b/module/plugins/hoster/WuploadCom.py @@ -1,239 +1,17 @@  #!/usr/bin/env python  # -*- coding: utf-8 -*- -import re -import string -from urllib import unquote +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -from module.plugins.Hoster import Hoster -from module.plugins.internal.CaptchaService import ReCaptcha -from module.plugins.Plugin import chunks -from module.network.RequestFactory import getURL -from module.common.json_layer import json_loads - - -def getInfo(urls): -    for chunk in chunks(urls, 20): -        result = [] -        ids = dict() -        for url in chunk: -            id = getId(url) -            if id: -                ids[id] = url -            else: -                result.append((None, 0, 1, url)) - -        if len(ids) > 0: -            check_url = "http://api.wupload.com/link?method=getInfo&format=json&ids=" + ",".join(ids.keys()) -            response = json_loads(getURL(check_url).decode("utf8", "ignore")) -            for item in response["FSApi_Link"]["getInfo"]["response"]["links"]: -                if item["status"] != "AVAILABLE": -                    result.append((None, 0, 1, ids[str(item["id"])])) -                else: -                    result.append((unquote(item["filename"]), item["size"], 2, ids[str(item["id"])])) -        yield result - - -def getId(url): -    match = re.search(WuploadCom.FILE_ID_PATTERN, url) -    if match: -        return string.replace(match.group("id"), "/", "-") -    else: -        return None - - -class WuploadCom(Hoster): +class WuploadCom(DeadHoster):      __name__ = "WuploadCom"      __type__ = "hoster"      __pattern__ = r"http://[\w\.]*?wupload\..*?/file/(([a-z][0-9]+/)?[0-9]+)(/.*)?" -    __version__ = "0.22" +    __version__ = "0.23"      __description__ = """Wupload com"""      __author_name__ = ("jeix", "paulking")      __author_mail__ = ("jeix@hasnomail.de", "") -    API_ADDRESS = "http://api.wupload.com" -    URL_DOMAIN_PATTERN = r'(?P<prefix>.*?)(?P<domain>.wupload\..+?)(?P<suffix>/.*)' -    FILE_ID_PATTERN = r'/file/(?P<id>([a-z][0-9]+/)?[0-9]+)(/.*)?' -    FILE_LINK_PATTERN = r'<p><a href="(http://.+?\.wupload\..+?)"><span>Download Now' -    WAIT_TIME_PATTERN = r'countDownDelay = (?P<wait>\d+)' -    WAIT_TM_PATTERN = r"name='tm' value='(.*?)' />" -    WAIT_TM_HASH_PATTERN = r"name='tm_hash' value='(.*?)' />" -    CAPTCHA_TYPE1_PATTERN = r'Recaptcha.create\("(.*?)",' -    CAPTCHA_TYPE2_PATTERN = r'id="recaptcha_image"><img style="display: block;" src="(.+)image?c=(.+?)"' - -    def setup(self): -        if not self.premium: -            self.chunkLimit = 1 -            self.multiDL = False - -    def process(self, pyfile): -        self.pyfile = pyfile - -        self.pyfile.url = self.checkFile(self.pyfile.url) - -        if self.premium: -            self.downloadPremium() -        else: -            self.downloadFree() - -    def checkFile(self, url): -        id = getId(url) -        self.logDebug("file id is %s" % id) -        if id: -            # Use the api to check the current status of the file and fixup data -            check_url = self.API_ADDRESS + "/link?method=getInfo&format=json&ids=%s" % id -            result = json_loads(self.load(check_url, decode=True)) -            item = result["FSApi_Link"]["getInfo"]["response"]["links"][0] -            self.logDebug("api check returns %s" % item) - -            if item["status"] != "AVAILABLE": -                self.offline() -            if item["is_password_protected"] != 0: -                self.fail("This file is password protected") - -            # ignored this check due to false api information -            #if item["is_premium_only"] != 0 and not self.premium: -            #    self.fail("need premium account for file") - -            self.pyfile.name = unquote(item["filename"]) - -            # Fix the url and resolve the domain to the correct regional variation -            url = item["url"] -            urlparts = re.search(self.URL_DOMAIN_PATTERN, url) -            if urlparts: -                url = urlparts.group("prefix") + self.getDomain() + urlparts.group("suffix") -                self.logDebug("localised url is %s" % url) -            return url -        else: -            self.fail("Invalid URL") - -    def getDomain(self): -        result = json_loads( -            self.load(self.API_ADDRESS + "/utility?method=getWuploadDomainForCurrentIp&format=json", decode=True)) -        self.logDebug("%s: response to get domain %s" % (self.__name__, result)) -        return result["FSApi_Utility"]["getWuploadDomainForCurrentIp"]["response"] - -    def downloadPremium(self): -        self.logDebug("Premium download") - -        api = self.API_ADDRESS + "/link?method=getDownloadLink&u=%%s&p=%%s&ids=%s" % getId(self.pyfile.url) - -        result = json_loads(self.load(api % (self.user, self.account.getAccountData(self.user)["password"]))) -        links = result["FSApi_Link"]["getDownloadLink"]["response"]["links"] - -        #wupload seems to return list and no dicts -        if type(links) == dict: -            info = links.values()[0] -        else: -            info = links[0] - -        if "status" in info and info["status"] == "NOT_AVAILABLE": -            self.tempOffline() - -        self.download(info["url"]) - -    def downloadFree(self): -        self.logDebug("Free download") -        # Get initial page -        self.html = self.load(self.pyfile.url) -        url = self.pyfile.url + "?start=1" -        self.html = self.load(url) -        self.handleErrors() - -        finalUrl = re.search(self.FILE_LINK_PATTERN, self.html) - -        if not finalUrl: -            self.doWait(url) - -            chall = re.search(self.CAPTCHA_TYPE1_PATTERN, self.html) -            chall2 = re.search(self.CAPTCHA_TYPE2_PATTERN, self.html) -            if chall or chall2: -                for i in range(5): -                    re_captcha = ReCaptcha(self) -                    if chall: -                        self.logDebug("Captcha type1") -                        challenge, result = re_captcha.challenge(chall.group(1)) -                    else: -                        self.logDebug("Captcha type2") -                        server = chall2.group(1) -                        challenge = chall2.group(2) -                        result = re_captcha.result(server, challenge) - -                    postData = {"recaptcha_challenge_field": challenge, -                                "recaptcha_response_field": result} - -                    self.html = self.load(url, post=postData) -                    self.handleErrors() -                    chall = re.search(self.CAPTCHA_TYPE1_PATTERN, self.html) -                    chall2 = re.search(self.CAPTCHA_TYPE2_PATTERN, self.html) - -                    if chall or chall2: -                        self.invalidCaptcha() -                    else: -                        self.correctCaptcha() -                        break - -            finalUrl = re.search(self.FILE_LINK_PATTERN, self.html) - -        if not finalUrl: -            self.fail("Couldn't find free download link") - -        self.logDebug("got download url %s" % finalUrl.group(1)) -        self.download(finalUrl.group(1)) - -    def doWait(self, url): -        # If the current page requires us to wait then wait and move to the next page as required - -        # There maybe more than one wait period. The extended wait if download limits have been exceeded (in which case we try reconnect) -        # and the short wait before every download. Visually these are the same, the difference is that one includes a code to allow -        # progress to the next page - -        waitSearch = re.search(self.WAIT_TIME_PATTERN, self.html) -        while waitSearch: -            wait = int(waitSearch.group("wait")) -            if wait > 300: -                self.wantReconnect = True - -            self.setWait(wait) -            self.logDebug("Waiting %d seconds." % wait) -            self.wait() - -            tm = re.search(self.WAIT_TM_PATTERN, self.html) -            tm_hash = re.search(self.WAIT_TM_HASH_PATTERN, self.html) - -            if tm and tm_hash: -                tm = tm.group(1) -                tm_hash = tm_hash.group(1) -                self.html = self.load(url, post={"tm": tm, "tm_hash": tm_hash}) -                self.handleErrors() -                break -            else: -                self.html = self.load(url) -                self.handleErrors() -                waitSearch = re.search(self.WAIT_TIME_PATTERN, self.html) - -    def handleErrors(self): -        if "This file is available for premium users only." in self.html: -            self.fail("need premium account for file") - -        if "The file that you're trying to download is larger than" in self.html: -            self.fail("need premium account for file") - -        if "Free users may only download 1 file at a time" in self.html: -            self.fail("only 1 file at a time for free users") - -        if "Free user can not download files" in self.html: -            self.fail("need premium account for file") - -        if "Download session in progress" in self.html: -            self.fail("already downloading") - -        if "This file is password protected" in self.html: -            self.fail("This file is password protected") - -        if "An Error Occurred" in self.html: -            self.fail("A server error occured.") -        if "This file was deleted" in self.html: -            self.offline() +getInfo = create_getInfo(WuploadCom) diff --git a/module/plugins/hoster/X7To.py b/module/plugins/hoster/X7To.py index 59ec6ed06..950cbd164 100644 --- a/module/plugins/hoster/X7To.py +++ b/module/plugins/hoster/X7To.py @@ -1,88 +1,16 @@  # -*- coding: utf-8 -*- -import re -from module.plugins.Hoster import Hoster +from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo -def getInfo(urls): -    yield [(url, 0, 1, url) for url in urls] - - -class X7To(Hoster): +class X7To(DeadHoster):      __name__ = "X7To"      __type__ = "hoster"      __pattern__ = r"http://(?:www.)?x7.to/" -    __version__ = "0.4" +    __version__ = "0.41"      __description__ = """X7.To File Download Hoster"""      __author_name__ = ("ernieb")      __author_mail__ = ("ernieb") -    FILE_INFO_PATTERN = r'<meta name="description" content="Download: (.*?) \(([0-9,.]+) (KB|MB|GB)\)' - -    def setup(self): -        self.multiDL = self.resumeDownload = False -        self.chunkLimit = 1 - -        self.file_id = re.search(r"http://x7.to/([a-zA-Z0-9]+)", self.pyfile.url).group(1) -        self.logDebug("file id is %s" % self.file_id) -        self.pyfile.url = "http://x7.to/" + self.file_id - -    def process(self, pyfile): -        self.fail("Hoster not longer available") - -    def handlePremium(self): -        # check if over limit first -        overLimit = re.search(r'<a onClick="cUser.buyTraffic\(\)" id="DL">', self.html) -        if overLimit: -            self.logDebug("over limit, falling back to free") -            self.handleFree() -        else: -            realurl = re.search(r'<a href="(http://stor.*?)" id="DL">', self.html) -            if realurl: -                realurl = realurl.group(1) -                self.logDebug("premium url found %s" % realurl) -            else: -                self.logDebug("premium link not found") -            self.download(realurl) - -    def handleFree(self): -        # find file id -        file_id = re.search(r"var dlID = '(.*?)'", self.html) -        if not file_id: -            self.fail("Free download id not found") - -        file_url = "http://x7.to/james/ticket/dl/" + file_id.group(1) -        self.logDebug("download id %s" % file_id.group(1)) - -        self.html = self.load(file_url, ref=False, decode=True) - -        # deal with errors -        if "limit-dl" in self.html: -            self.logDebug("Limit reached ... waiting") -            self.setWait(900, True) -            self.wait() -            self.retry() - -        if "limit-parallel" in self.html: -            self.fail("Cannot download in parallel") - -        # no waiting required, go to download -        waitCheck = re.search(r"wait:(\d*),", self.html) -        if waitCheck: -            waitCheck = int(waitCheck.group(1)) -            self.setWait(waitCheck) -            self.wait() - -        urlCheck = re.search(r"url:'(.*?)'", self.html) -        url = None -        if urlCheck: -            url = urlCheck.group(1) -            self.logDebug("free url found %s" % url) -        if url: -            try: -                self.download(url) -            except: -                self.logDebug("downloading url failed: %s" % url) -        else: -            self.fail("Free download url found") +getInfo = create_getInfo(X7To) | 
