如何使用python aiohttp库下载多个网页?

How to use python aiohttp library to download multiple webpages?

我正在尝试从 leaderboard for a video game. There are weekly and daily challenges. I've based my code so far on this async client with semaphores 中异步抓取数据。不同之处在于我试图包含在函数中使用循环的结尾。这是我的代码的相关部分:

from urllib.parse import urljoin
import asyncio
import aiohttp

async def fetch(url, session):
    async with session.get(url) as response:
            return await response.read()


async def bound_fetch(url, session, sem):
    async with sem:
        await fetch(url, session)

async def fetch_pages(url,pages,session):
    tasks = []
    sem = asyncio.Semaphore(LIMIT)

    for page in range(pages+1):
        task_url = urljoin(url,str(page))
        task = asyncio.ensure_future(bound_fetch(task_url, session, sem))
        tasks.append(task)

    await asyncio.gather(*tasks)

def leaderboard_crawler(date, entries=0, pages=1):
    website = "https://www.thronebutt.com/archive/"
    date_url = urljoin(website,date+"/")
    entries_per_page = 30
    number_of_entries = entries or pages * entries_per_page
    full_pages, last_page = divmod(number_of_entries,30)
    entry_list = [30 for x in range(full_pages)]
    if last_page != 0:
        entry_list.append(last_page)    

    loop = asyncio.get_event_loop()
    with aiohttp.ClientSession() as session:
        future = asyncio.ensure_future(fetch_pages(date_url,pages,session))
        date_html = loop.run_until_complete(future)

    return date_html

def weekly_leaderboard(week, year, entries=0, pages=1):
    weekly_date = "{0:02d}{1}".format(week, year)
    return leaderboard_crawler(weekly_date,entries,pages)

def daily_leaderboard(day, month, year, entries=0, pages=1):
    daily_date = "{0:02d}{1:02d}{2}".format(day, month, year)
    return leaderboard_crawler(daily_date, entries, pages)

我认为问题出在 fetch_urls 函数的 asyncio.gather(*tasks) 部分。我不知道如何将其传递给 leaderboard_crawler。现在 date_html 是 None。我试过 return await asyncio.gather(*tasks),其中 returns 是 None 的数组。我也试过将它包装在 asyncio.ensure_future 中然后传递给 loop.run_until_complete 但这似乎也不起作用。

原因很简单,您的调用堆栈中缺少 return

async def bound_fetch(url, session, sem):
    async with sem:
        # await fetch(url, session)  # missing return
        return await fetch(url, session)  # this one is right

async def fetch_pages(url,pages,session):
    tasks = []
    sem = asyncio.Semaphore(LIMIT)

    for page in range(pages+1):
        task_url = urljoin(url,str(page))
        task = asyncio.ensure_future(bound_fetch(task_url, session, sem))
        tasks.append(task)

    # await asyncio.gather(*tasks)  # missing return
    return await asyncio.gather(*tasks)  # this one is right.

工作示例在这里:

from urllib.parse import urljoin
import asyncio
import aiohttp

async def fetch(url, session):
    async with session.get(url) as response:
            return await response.read()


async def bound_fetch(url, session, sem):
    async with sem:
        return await fetch(url, session)

async def fetch_pages(url,pages,session):
    tasks = []
    sem = asyncio.Semaphore(5)

    for page in range(pages+1):
        task_url = urljoin(url,str(page))
        task = asyncio.ensure_future(bound_fetch(task_url, session, sem))
        tasks.append(task)

    return await asyncio.gather(*tasks)

def leaderboard_crawler(date, entries=0, pages=1):
    website = "https://www.thronebutt.com/archive/"
    date_url = urljoin(website,date+"/")
    entries_per_page = 30
    number_of_entries = entries or pages * entries_per_page
    full_pages, last_page = divmod(number_of_entries,30)
    entry_list = [30 for x in range(full_pages)]
    if last_page != 0:
        entry_list.append(last_page)    

    loop = asyncio.get_event_loop()
    with aiohttp.ClientSession() as session:
        future = asyncio.ensure_future(fetch_pages(date_url,pages,session))
        date_html = loop.run_until_complete(future)

    return date_html

def weekly_leaderboard(week, year, entries=0, pages=1):
    weekly_date = "{0:02d}{1}".format(week, year)
    return leaderboard_crawler(weekly_date,entries,pages)

def daily_leaderboard(day, month, year, entries=0, pages=1):
    daily_date = "{0:02d}{1:02d}{2}".format(day, month, year)
    return leaderboard_crawler(daily_date, entries, pages)