如何使用 Python 从指向子 URL 的 URL 下载 pdf 文件

How to download pdf files from URLs leading to sub-URLs using Python

我正在尝试从以下 URL 中的链接下载所有 pdf 文件:

https://www.adb.org/projects/documents/country/ban/year/2020?terms=education
https://www.adb.org/projects/documents/country/ban/year/2019?terms=education
https://www.adb.org/projects/documents/country/ban/year/2018?terms=education

这些 URL 包含指向包含 pdf 文件的子链接的链接列表。主要网址中的链接列表来自国家、年份和术语的搜索结果。

我尝试过以不同的方式更改以下代码。但是,它似乎没有用。任何帮助,将不胜感激。谢谢

import os
import time
from glob import glob 
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
 
url = ["https://www.adb.org/projects/documents/country/ban/year/2020?terms=education",
      "https://www.adb.org/projects/documents/country/ban/year/2019?terms=education",
      "https://www.adb.org/projects/documents/country/ban/year/2018?terms=education"]

folder = glob("J:/pdfs/*/")

for i, folder_location in zip(url, folder):
    time.sleep(1)
    response = requests.get(i)
    soup= BeautifulSoup(response.text, "lxml")
    for link in soup.select("[href$='.pdf']"):

        filename = os.path.join(folder_location,link['href'].split('/')[-1])
        with open(filename, 'wb') as f:
            f.write(requests.get(urljoin(i,link['href'])).content)

试试这个。它会将文件放入 PDF 文件夹中。

import os
from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils

class MySpider(Spider):
    name = 'download_pdf'
    allowed_domains = ["www.adb.org"]
    start_urls = [
        "https://www.adb.org/projects/documents/country/ban/year/2020?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2019?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2018?terms=education"
    ]  # Entry page

    def __init__(self):
        Spider.__init__(self, self.name)  #necessary
        if (not os.path.exists('./pdfs')):
            os.mkdir('./pdfs')

    def afterResponse(self, response, url, error=None, extra=None):
        try:
            path = './pdfs' + url[url.rindex('/'):]
            index = path.find('?')
            if index > 0: path = path[:index]
            flag = utils.saveResponseAsFile(response, path, fileType="pdf")
            if flag:
                return None
            else:  # If it's not a pdf, leave it to the frame
                return Spider.afterResponse(self, response, url, error)
        except Exception as err:
            print(err)

    def extract(self, url, html, models, modelNames):
        doc = SimplifiedDoc(html)
        lst = doc.selects('div.list >a').contains("documents/", attr="href")
        if not lst:
            lst = doc.selects('div.hidden-md hidden-lg >a')
        urls = []
        for a in lst:
            a["url"] = utils.absoluteUrl(url.url, a["href"])
            urls.append(a)

        return {"Urls": urls}


SimplifiedMain.startThread(MySpider())  # Start download

每个 url 的 pdf 下载到每个单独的文件夹。

from simplified_scrapy import Spider, SimplifiedDoc, SimplifiedMain, utils

class MySpider(Spider):
    name = 'download_pdf'
    allowed_domains = ["www.adb.org"]
    start_urls = [
        "https://www.adb.org/projects/documents/country/ban/year/2020?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2019?terms=education",
        "https://www.adb.org/projects/documents/country/ban/year/2018?terms=education"
    ]  # Entry page

    def afterResponse(self, response, url, error=None, extra=None):
        if not extra:
            print ("The version of library simplified_scrapy is too old, please update.")
            SimplifiedMain.setRunFlag(False)
            return
        try:
            path = './pdfs'
            # create folder start
            srcUrl = extra.get('srcUrl')
            if srcUrl:
                index = srcUrl.find('year/')
                year = ''
                if index > 0:
                    year = srcUrl[index + 5:]
                    index = year.find('?')
                    if index>0:
                        path = path + year[:index]
                        utils.createDir(path)
            # create folder end

            path = path + url[url.rindex('/'):]
            index = path.find('?')
            if index > 0: path = path[:index]
            flag = utils.saveResponseAsFile(response, path, fileType="pdf")
            if flag:
                return None
            else:  # If it's not a pdf, leave it to the frame
                return Spider.afterResponse(self, response, url, error, extra)
        except Exception as err:
            print(err)

    def extract(self, url, html, models, modelNames):
        doc = SimplifiedDoc(html)
        lst = doc.selects('div.list >a').contains("documents/", attr="href")
        if not lst:
            lst = doc.selects('div.hidden-md hidden-lg >a')
        urls = []
        for a in lst:
            a["url"] = utils.absoluteUrl(url.url, a["href"])
            # Set root url start
            a["srcUrl"] = url.get('srcUrl')
            if not a['srcUrl']:
                a["srcUrl"] = url.url
            # Set root url end
            urls.append(a)

        return {"Urls": urls}

    # Download again by resetting the URL. Called when you want to download again.
    def resetUrl(self):
        Spider.clearUrl(self)
        Spider.resetUrlsTest(self)

SimplifiedMain.startThread(MySpider())  # Start download