无法将库导入 AWS Lambda
Couldn't import library into AWS Lambda
我正在尝试在 Python 3.7
中创建一个 AWS Lambda
函数。但是在测试我的功能时遇到错误 -
{
"errorMessage": "Unable to import module 'lambda_function': No module named 's3fs'",
"errorType": "Runtime.ImportModuleError"
}
我知道,默认情况下 Python 环境没有导入 s3fs
模块。所以我不得不在我的本地机器上安装 s3fs
和其他一些包并压缩它。对于压缩,我使用了以下命令。
Compress-Archive -Path dateutil, docutils, jmespath, s3fs, s3transfer, six.py ` -DestinationPath ..\..\..\pinpoint-importer.zip ;
(我遵循了以下文档 - https://docs.aws.amazon.com/pinpoint/latest/developerguide/tutorials-importing-data-create-python-package.html)。
然后在 Lambda 中上传 .zip 文件并交叉检查处理程序 - "lambda_function.lambda_handler"。
请在下面找到代码。
import os
import boto3
import s3fs
from botocore.exceptions import ClientError
input_archive_folder = "input_archive"
to_process_folder = "to_process"
file_row_limit = 50
file_delimiter = ','
# S3 bucket info
s3 = s3fs.S3FileSystem(anon=False)
def lambda_handler(event, context):
print("Received event: \n" + str(event))
for record in event['Records']:
# Assign some variables that make it easier to work with the data in the
# event record.
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
input_file = os.path.join(bucket,key)
archive_path = os.path.join(bucket,input_archive_folder,os.path.basename(key))
folder = os.path.split(key)[0]
s3_url = os.path.join(bucket,folder)
output_file_template = os.path.splitext(os.path.basename(key))[0] + "__part"
output_path = os.path.join(bucket,to_process_folder)
# Set a variable that contains the number of files that this Lambda
# function creates after it runs.
num_files = file_count(s3.open(input_file, 'r'), file_delimiter, file_row_limit)
# Split the input file into several files, each with 50 rows.
split(s3.open(input_file, 'r'), file_delimiter, file_row_limit, output_file_template, output_path, True, num_files)
# Send the unchanged input file to an archive folder.
archive(input_file,archive_path)
# Determine the number of files that this Lambda function will create.
def file_count(file_handler, delimiter, row_limit):
import csv
reader = csv.reader(file_handler, delimiter=delimiter)
# Figure out the number of files this function will generate.
row_count = sum(1 for row in reader) - 1
# If there's a remainder, always round up.
file_count = int(row_count // row_limit) + (row_count % row_limit > 0)
return file_count
# Split the input into several smaller files.
def split(filehandler, delimiter, row_limit, output_name_template, output_path, keep_headers, num_files):
import csv
reader = csv.reader(filehandler, delimiter=delimiter)
current_piece = 1
current_out_path = os.path.join(
output_path,
output_name_template + str(current_piece) + "__of" + str(num_files) + ".csv"
)
current_out_writer = csv.writer(s3.open(current_out_path, 'w'), delimiter=delimiter)
current_limit = row_limit
if keep_headers:
headers = next(reader)
current_out_writer.writerow(headers)
for i, row in enumerate(reader):
if i + 1 > current_limit:
current_piece += 1
current_limit = row_limit * current_piece
current_out_path = os.path.join(
output_path,
output_name_template + str(current_piece) + "__of" + str(num_files) + ".csv"
)
current_out_writer = csv.writer(s3.open(current_out_path, 'w'), delimiter=delimiter)
if keep_headers:
current_out_writer.writerow(headers)
current_out_writer.writerow(row)
# Move the original input file into an archive folder.
def archive(input_file, archive_path):
s3.copy_basic(input_file,archive_path)
print("Moved " + input_file + " to " + archive_path)
s3.rm(input_file)
一张截图 -
如果我遗漏了什么,请告诉我。谢谢。
好吧,我找到了解决这个问题的方法。虽然我还不清楚问题的根本原因。
我将代码(可以在 AWS 文档中找到)粘贴到我的本地系统并压缩整个目录而不是指定的库(在 AWS 文档中指定 - 第 8 步 - https://docs.aws.amazon.com/pinpoint/latest/developerguide/tutorials-importing-data-create-python-package.html )以及编辑后的文件 - lambda_function.py
.
然后在S3上传zip,在Lambda函数代码部分拉zip。
成功了!
我会要求查看者测试此文档 https://docs.aws.amazon.com/pinpoint/latest/developerguide/tutorials-importing-data-create-python-package.html 如果可能的话,如果我遗漏了什么或文档需要更正,请告诉我。非常感谢!
如果您将错误消息中显示的所有文件添加到 zip 文件中,然后一次性手动上传它们,它就会起作用。我认为需要通知 AWS,因为他们的指南没有提及有关此类错误的任何信息。
我正在尝试在 Python 3.7
中创建一个 AWS Lambda
函数。但是在测试我的功能时遇到错误 -
{
"errorMessage": "Unable to import module 'lambda_function': No module named 's3fs'",
"errorType": "Runtime.ImportModuleError"
}
我知道,默认情况下 Python 环境没有导入 s3fs
模块。所以我不得不在我的本地机器上安装 s3fs
和其他一些包并压缩它。对于压缩,我使用了以下命令。
Compress-Archive -Path dateutil, docutils, jmespath, s3fs, s3transfer, six.py ` -DestinationPath ..\..\..\pinpoint-importer.zip ;
(我遵循了以下文档 - https://docs.aws.amazon.com/pinpoint/latest/developerguide/tutorials-importing-data-create-python-package.html)。
然后在 Lambda 中上传 .zip 文件并交叉检查处理程序 - "lambda_function.lambda_handler"。
请在下面找到代码。
import os
import boto3
import s3fs
from botocore.exceptions import ClientError
input_archive_folder = "input_archive"
to_process_folder = "to_process"
file_row_limit = 50
file_delimiter = ','
# S3 bucket info
s3 = s3fs.S3FileSystem(anon=False)
def lambda_handler(event, context):
print("Received event: \n" + str(event))
for record in event['Records']:
# Assign some variables that make it easier to work with the data in the
# event record.
bucket = record['s3']['bucket']['name']
key = record['s3']['object']['key']
input_file = os.path.join(bucket,key)
archive_path = os.path.join(bucket,input_archive_folder,os.path.basename(key))
folder = os.path.split(key)[0]
s3_url = os.path.join(bucket,folder)
output_file_template = os.path.splitext(os.path.basename(key))[0] + "__part"
output_path = os.path.join(bucket,to_process_folder)
# Set a variable that contains the number of files that this Lambda
# function creates after it runs.
num_files = file_count(s3.open(input_file, 'r'), file_delimiter, file_row_limit)
# Split the input file into several files, each with 50 rows.
split(s3.open(input_file, 'r'), file_delimiter, file_row_limit, output_file_template, output_path, True, num_files)
# Send the unchanged input file to an archive folder.
archive(input_file,archive_path)
# Determine the number of files that this Lambda function will create.
def file_count(file_handler, delimiter, row_limit):
import csv
reader = csv.reader(file_handler, delimiter=delimiter)
# Figure out the number of files this function will generate.
row_count = sum(1 for row in reader) - 1
# If there's a remainder, always round up.
file_count = int(row_count // row_limit) + (row_count % row_limit > 0)
return file_count
# Split the input into several smaller files.
def split(filehandler, delimiter, row_limit, output_name_template, output_path, keep_headers, num_files):
import csv
reader = csv.reader(filehandler, delimiter=delimiter)
current_piece = 1
current_out_path = os.path.join(
output_path,
output_name_template + str(current_piece) + "__of" + str(num_files) + ".csv"
)
current_out_writer = csv.writer(s3.open(current_out_path, 'w'), delimiter=delimiter)
current_limit = row_limit
if keep_headers:
headers = next(reader)
current_out_writer.writerow(headers)
for i, row in enumerate(reader):
if i + 1 > current_limit:
current_piece += 1
current_limit = row_limit * current_piece
current_out_path = os.path.join(
output_path,
output_name_template + str(current_piece) + "__of" + str(num_files) + ".csv"
)
current_out_writer = csv.writer(s3.open(current_out_path, 'w'), delimiter=delimiter)
if keep_headers:
current_out_writer.writerow(headers)
current_out_writer.writerow(row)
# Move the original input file into an archive folder.
def archive(input_file, archive_path):
s3.copy_basic(input_file,archive_path)
print("Moved " + input_file + " to " + archive_path)
s3.rm(input_file)
一张截图 -
如果我遗漏了什么,请告诉我。谢谢。
好吧,我找到了解决这个问题的方法。虽然我还不清楚问题的根本原因。
我将代码(可以在 AWS 文档中找到)粘贴到我的本地系统并压缩整个目录而不是指定的库(在 AWS 文档中指定 - 第 8 步 - https://docs.aws.amazon.com/pinpoint/latest/developerguide/tutorials-importing-data-create-python-package.html )以及编辑后的文件 -
lambda_function.py
.然后在S3上传zip,在Lambda函数代码部分拉zip。
成功了!
我会要求查看者测试此文档 https://docs.aws.amazon.com/pinpoint/latest/developerguide/tutorials-importing-data-create-python-package.html 如果可能的话,如果我遗漏了什么或文档需要更正,请告诉我。非常感谢!
如果您将错误消息中显示的所有文件添加到 zip 文件中,然后一次性手动上传它们,它就会起作用。我认为需要通知 AWS,因为他们的指南没有提及有关此类错误的任何信息。