diff options
| author | 2012-01-23 19:51:10 +0100 | |
|---|---|---|
| committer | 2012-01-23 19:51:10 +0100 | |
| commit | 1c93fefbea4140b45688b0cdd30e9527b5688e53 (patch) | |
| tree | e4e10b36a463870d827131e1c4b0e3c6c781d32a /module | |
| parent | undelete DlFreeFr (diff) | |
| download | pyload-1c93fefbea4140b45688b0cdd30e9527b5688e53.tar.xz | |
disabled some hoster
Diffstat (limited to 'module')
| -rw-r--r-- | module/plugins/hoster/FileserveCom.py | 23 | ||||
| -rw-r--r-- | module/plugins/hoster/FilesonicCom.py | 31 | ||||
| -rw-r--r-- | module/plugins/hoster/MegauploadCom.py | 105 | ||||
| -rw-r--r-- | module/plugins/hoster/X7To.py | 52 | 
4 files changed, 13 insertions, 198 deletions
| diff --git a/module/plugins/hoster/FileserveCom.py b/module/plugins/hoster/FileserveCom.py index 7f34621aa..ce3836a48 100644 --- a/module/plugins/hoster/FileserveCom.py +++ b/module/plugins/hoster/FileserveCom.py @@ -12,26 +12,13 @@ from module.utils import parseFileSize  def getInfo(urls):
 -    reg = r"<td>(http://(?:www\.)?fileserve\.com/file/.+(?:[\r\n\t]+)?)</td>[\r\n\t ]+<td>(.*?)</td>[\r\n\t ]+<td>(.*?)</td>[\r\n\t ]+<td>(Available|Not available)(?:\ )?(?:<img|</td>)"
 -    url = "http://fileserve.com/link-checker.php"
 -
 -    #get all at once, shows strange behavior otherwise
 -    html = getURL(url, post={"submit": "Check Urls", "urls": "\n".join(urls)}, decode=True)
 -
 -    match = re.findall(reg, html, re.IGNORECASE + re.MULTILINE)
 -
 -    result = []
 -    for url, name, size, status in match:
 -        result.append((name, parseFileSize(size), 1 if status == "Not available" else 2, url))
 -
 -    yield result
 -
 +    yield [(url, 0, 1, url) for url in urls]
  class FileserveCom(Hoster):
      __name__ = "FileserveCom"
      __type__ = "hoster"
      __pattern__ = r"http://(www\.)?fileserve\.com/file/[a-zA-Z0-9]+"
 -    __version__ = "0.43"
 +    __version__ = "0.44"
      __description__ = """Fileserve.Com File Download Hoster"""
      __author_name__ = ("jeix", "mkaay", "paul king")
      __author_mail__ = ("jeix@hasnomail.de", "mkaay@mkaay.de", "")
 @@ -48,11 +35,7 @@ class FileserveCom(Hoster):              self.chunkLimit = 1
      def process(self, pyfile):
 -        self.checkFile()
 -        if self.account and self.premium:
 -            self.handlePremium()
 -        else:
 -            self.handleFree()
 +        self.fail("Hoster not longer available")
      def checkFile(self):
          self.file_id = re.search(self.FILE_ID_KEY, self.pyfile.url).group("id")
 diff --git a/module/plugins/hoster/FilesonicCom.py b/module/plugins/hoster/FilesonicCom.py index 2788e7c62..525a99e7a 100644 --- a/module/plugins/hoster/FilesonicCom.py +++ b/module/plugins/hoster/FilesonicCom.py @@ -14,25 +14,7 @@ from module.common.json_layer import json_loads  def getInfo(urls):
 -    for chunk in chunks(urls, 20):
 -        result = []
 -        ids = dict()
 -        for url in chunk:
 -            id = getId(url)
 -            if id:
 -                ids[id] = url
 -            else:
 -                result.append((None, 0, 1, url))
 -
 -        if len(ids) > 0:
 -            check_url = "http://api.filesonic.com/link?method=getInfo&format=json&ids=" + ",".join(ids.keys())
 -            response = json_loads(getURL(check_url, decode=True))
 -            for item in response["FSApi_Link"]["getInfo"]["response"]["links"]:
 -                if item["status"] != "AVAILABLE":
 -                    result.append((ids[str(item["id"])], 0, 1, ids[str(item["id"])]))
 -                else:
 -                    result.append((unquote(item["filename"]), item["size"], 2, ids[str(item["id"])]))
 -        yield result
 +    yield [(url, 0, 1, url) for url in urls]
  def getId(url):
 @@ -47,7 +29,7 @@ class FilesonicCom(Hoster):      __name__ = "FilesonicCom"
      __type__ = "hoster"
      __pattern__ = r"http://[\w\.]*?(sharingmatrix|filesonic)\..*?/.*?file/([a-zA-Z0-9]+(/.+)?|[a-z0-9]+/[0-9]+(/.+)?|[0-9]+(/.+)?)"
 -    __version__ = "0.35"
 +    __version__ = "0.36"
      __description__ = """FilesonicCom und Sharingmatrix Download Hoster"""
      __author_name__ = ("jeix", "paulking")
      __author_mail__ = ("jeix@hasnomail.de", "")
 @@ -70,14 +52,7 @@ class FilesonicCom(Hoster):              self.multiDL = False
      def process(self, pyfile):
 -        self.pyfile = pyfile
 -
 -        self.pyfile.url = self.checkFile(self.pyfile.url)
 -
 -        if self.premium:
 -            self.downloadPremium()
 -        else:
 -            self.downloadFree()
 +        self.fail("Hoster not longer available")
      def checkFile(self, url):
          id = getId(url)
 diff --git a/module/plugins/hoster/MegauploadCom.py b/module/plugins/hoster/MegauploadCom.py index 81d528668..336cbfb58 100644 --- a/module/plugins/hoster/MegauploadCom.py +++ b/module/plugins/hoster/MegauploadCom.py @@ -15,37 +15,8 @@ from module.PyFile import statusMap  from pycurl import error
  def getInfo(urls):
 +    yield [(url, 0, 1, url) for url in urls]
 -    result = []
 -    
 -    # MU API request 
 -    post = {}
 -    fileIds=[]
 -    for match in re.finditer(MegauploadCom.__pattern__, " ".join(urls)):
 -        fileIds.append(match.group("id"))
 -    for i, fileId in enumerate(fileIds):
 -        post["id%i" % i] = fileId
 -    response = getURL(MegauploadCom.API_URL, post=post, decode = True)
 -    
 -    # Process API response
 -    parts = [re.split(r"&(?!amp;|#\d+;)", x) for x in re.split(r"&?(?=id[\d]+=)", response)]
 -    apiHosterMap = dict([elem.split('=') for elem in parts[0]])
 -    for entry in parts[1:]:
 -        apiFileDataMap = dict([elem.split('=') for elem in entry])
 -        apiFileId = [key for key in apiFileDataMap.keys() if key.startswith('id')][0]
 -        i = int(apiFileId.replace('id', ''))
 -            
 -        # File info
 -        fileInfo = _translateAPIFileInfo(apiFileId, apiFileDataMap, apiHosterMap)
 -        url = urls[i]
 -        name = html_unescape(fileInfo.get('name', url))
 -        size = fileInfo.get('size', 0)
 -        status = fileInfo.get('status', statusMap['queued'])
 -        
 -        # Add result
 -        result.append( (name, size, status, url ) )
 -    
 -    yield result
  def _translateAPIFileInfo(apiFileId, apiFileDataMap, apiHosterMap):
 @@ -65,7 +36,7 @@ class MegauploadCom(Hoster):      __name__ = "MegauploadCom"
      __type__ = "hoster"
      __pattern__ = r"http://[\w\.]*?(megaupload)\.com/.*?(\?|&)d=(?P<id>[0-9A-Za-z]+)"
 -    __version__ = "0.31"
 +    __version__ = "0.32"
      __description__ = """Megaupload.com Download Hoster"""
      __author_name__ = ("spoob")
      __author_mail__ = ("spoob@pyload.org")
 @@ -92,77 +63,7 @@ class MegauploadCom(Hoster):      def process(self, pyfile):
 -        if not self.account or not self.premium:
 -            self.download_html()
 -            self.download_api()
 -
 -            if not self.file_exists():
 -                self.offline()
 -
 -            url = self.get_file_url()
 -            if not url: self.fail("URL could not be retrieved")
 -
 -            time = self.get_wait_time()
 -            self.setWait(time)
 -            self.wait()
 -            
 -            try:
 -                self.download(url)
 -            except BadHeader, e:
 -                if not e.code == 503: raise
 -                self.checkWait()
 -
 -            check = self.checkDownload({"limit": "Download limit exceeded"})
 -            if check == "limit":
 -                self.checkWait()
 -    
 -        else:
 -            self.download_api()
 -            pyfile.name = self.get_file_name()
 -
 -            try:
 -                self.download(pyfile.url)
 -            except error, e:
 -                if e.args and e.args[0] == 33:
 -                    # undirect download and resume , not a good idea
 -                    page = self.load(pyfile.url)
 -                    self.download(re.search(self.PREMIUM_URL_PATTERN, page).group(1))
 -                    return 
 -                else:
 -                    raise
 -
 -            check = self.checkDownload({"dllink": re.compile(self.PREMIUM_URL_PATTERN)})
 -            if check == "dllink":
 -                self.log.warning(_("You should enable direct Download in your Megaupload Account settings"))
 -
 -                pyfile.size = 0
 -                self.download(self.lastCheck.group(1))
 -
 -    def checkWait(self):
 -
 -        wait = 0
 -
 -        for i in range(10):
 -            page = self.load("http://www.megaupload.com/?c=premium&l=1", decode=True)
 -            # MU thinks dl is already running
 -            if "Please finish this download before starting another one." in page and i != 9:
 -                sleep(2)
 -            elif i != 9:
 -                try:
 -                    wait = re.search(r"Please wait (\d+) minutes", page).group(1)
 -                    break
 -                except :
 -                    pass
 -            else:
 -                wait = 2 # lowest limit seems to be 2 minutes
 -
 -        self.log.info(_("Megaupload: waiting %d minutes") % int(wait))
 -        self.setWait(int(wait)*60, True)
 -        self.wait()
 -        if not self.premium:
 -            self.req.clearCookies()
 -
 -        self.retry(max_tries=10)
 +        self.fail("Hoster not longer available")
      def download_html(self):        
          for i in range(3):
 diff --git a/module/plugins/hoster/X7To.py b/module/plugins/hoster/X7To.py index dba7338e4..2ba534cff 100644 --- a/module/plugins/hoster/X7To.py +++ b/module/plugins/hoster/X7To.py @@ -6,36 +6,14 @@ from module.plugins.Hoster import Hoster  from module.network.RequestFactory import getURL
  def getInfo(urls):
 -    result = []
 -
 -    for url in urls:
 -        html = getURL(url)
 -
 -        if "var page = '404';" in html:
 -            result.append((url, 0, 1, url))
 -            continue
 -
 -        fileInfo = re.search(X7To.FILE_INFO_PATTERN, html)
 -        if fileInfo:
 -            name = fileInfo.group(1)
 -            units = float(fileInfo.group(2).replace(",", "."))
 -            pow = {'KB': 1, 'MB': 2, 'GB': 3}[fileInfo.group(3)]
 -            size = int(units * 1024 ** pow)
 -        else:
 -            # fallback: name could not be extracted.. most likely change on x7.to side ... needs to be checked then
 -            name = url
 -            size = 0
 -
 -        result.append((name, size, 2, url))
 -
 -    yield result
 +    yield [(url, 0, 1, url) for url in urls]
  class X7To(Hoster):
      __name__ = "X7To"
      __type__ = "hoster"
      __pattern__ = r"http://(?:www.)?x7.to/"
 -    __version__ = "0.1"
 +    __version__ = "0.2"
      __description__ = """X7.To File Download Hoster"""
      __author_name__ = ("ernieb")
      __author_mail__ = ("ernieb")
 @@ -55,29 +33,7 @@ class X7To(Hoster):          self.pyfile.url = "http://x7.to/" + self.file_id
      def process(self, pyfile):
 -        self.html = self.load(self.pyfile.url, ref=False, decode=True)
 -
 -        if "var page = '404';" in self.html:
 -            self.offline()
 -
 -        fileInfo = re.search(self.FILE_INFO_PATTERN, self.html, re.IGNORECASE)
 -        size = 0
 -        trafficLeft = 100000000000
 -        if fileInfo:
 -            self.pyfile.name = fileInfo.group(1)
 -            if self.account:
 -                trafficLeft = self.account.getAccountInfo(self.user)["trafficleft"]
 -                units = float(fileInfo.group(2).replace(".", "").replace(",", "."))
 -                pow = {'KB': 1, 'MB': 2, 'GB': 3}[fileInfo.group(3)]
 -                size = int(units * 1024 ** pow)
 -                self.logDebug("filesize: %s    trafficleft: %s" % (size, trafficLeft))
 -        else:
 -            self.logDebug("name and size not found")
 -
 -        if self.account and self.premium and trafficLeft > size:
 -            self.handlePremium()
 -        else:
 -            self.handleFree()
 +        self.fail("Hoster not longer available")
      def handlePremium(self):
          # check if over limit first
 @@ -134,4 +90,4 @@ class X7To(Hoster):  	    except:
  		self.logDebug("downloading url failed: %s" % url)
  	else:
 -	    self.fail("Free download url found")
\ No newline at end of file +	    self.fail("Free download url found")
 | 
