如何防止 moto 测试抛出 NoSuchBucket 错误?
How to prevent moto test throwing NoSuchBucketError?
我正在尝试编写一个测试来验证 register_extracts_by_location 是否能够从 s3 存储桶中读取并抓取文件。写moto mock测试时,报错bucket不存在
这是 register_extracts_by_location 方法:
class ProcessTracker:
# ... other methods and init here.
def register_extracts_by_location(self, location_path, location_name=None):
"""
For a given location, find all files and attempt to register them.
:param location_name: Name of the location
:param location_path: Path of the location
:return:
"""
location = LocationTracker(location_path=location_path, location_name=location_name)
if location.location_type.location_type_name == "s3":
s3 = boto3.resource("s3")
path = location.location_path
if path.startswith("s3://"):
path = path[len("s3://")]
bucket = s3.Bucket(path)
for file in bucket.objects.all():
ExtractTracker(process_run=self
, filename=file
, location=location
, status='ready')
else:
for file in os.listdir(location_path):
ExtractTracker(process_run=self
, filename=file
, location=location
, status='ready')
测试的相关部分在这里:
def test_register_extracts_by_location_s3(self):
"""
Testing that when the location is s3, all the extracts are registered and set to 'ready' status.
The process/extract relationship should also be set to 'ready' since that is the last status the process set
the extracts to.
:return:
"""
process_status = aliased(ExtractStatus)
extract_status = aliased(ExtractStatus)
expected_keys = 'test_local_dir_1.csv', 'test_local_dir_2.csv'
with moto.mock_s3():
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test_bucket')
for file in expected_keys:
conn.Object('test_bucket', file)
self.process_tracker.register_extracts_by_location(location_path='s3://test_bucket')
boto3 似乎仍在连接中,但我现在不确定。收到的错误是:
botocore.errorfactory.NoSuchBucket: An error occurred (NoSuchBucket) when calling the ListObjects operation: The specified bucket does not exist
我能够通过创建模拟 s3 存储桶然后在测试中进一步使用它来解决问题。这是我认为按预期工作的完整测试:
def test_register_extracts_by_location_s3(self):
"""
Testing that when the location is s3, all the extracts are registered and set to 'ready' status.
The process/extract relationship should also be set to 'ready' since that is the last status the process set
the extracts to.
:return:
"""
process_status = aliased(ExtractStatus)
extract_status = aliased(ExtractStatus)
test_bucket = "test_bucket"
expected_keys = ["test_local_dir_1.csv", "test_local_dir_2.csv"]
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id="fake_access_key",
aws_secret_access_key="fake_secret_key",
)
try:
s3 = boto3.resource(
"s3",
region_name="us-east-1",
aws_access_key_id="fake_access_key",
aws_secret_access_key="fake_secret_key",
)
s3.meta.client.head_bucket(Bucket=test_bucket)
except botocore.exceptions.ClientError:
pass
else:
err = "%s should not exist" % test_bucket
raise EnvironmentError(err)
client.create_bucket(Bucket=test_bucket)
current_dir = os.path.dirname(__file__)
fixtures_dir = os.path.join(current_dir, "fixtures")
for file in expected_keys:
key = os.path.join(test_bucket, file)
print(file)
print(key)
print(fixtures_dir)
file = os.path.join(fixtures_dir, file)
client.upload_file(Filename=file, Bucket=test_bucket, Key=key)
self.process_tracker.register_extracts_by_location(
location_path="s3://test_bucket"
)
extracts = (
self.session.query(
Extract.extract_filename,
extract_status.extract_status_name,
process_status.extract_status_name,
)
.join(
ExtractProcess, Extract.extract_id == ExtractProcess.extract_tracking_id
)
.join(
extract_status,
Extract.extract_status_id == extract_status.extract_status_id,
)
.join(
process_status,
ExtractProcess.extract_process_status_id
== process_status.extract_status_id,
)
.filter(
ExtractProcess.process_tracking_id
== self.process_tracker.process_tracking_run.process_tracking_id
)
)
given_result = list()
for extract in extracts:
given_result.append(
[
extract.extract_filename,
extract.extract_status_name,
extract.extract_status_name,
]
)
expected_result = [
["test_bucket/test_local_dir_1.csv", "ready", "ready"],
["test_bucket/test_local_dir_2.csv", "ready", "ready"],
]
self.assertCountEqual(expected_result, given_result)
我正在尝试编写一个测试来验证 register_extracts_by_location 是否能够从 s3 存储桶中读取并抓取文件。写moto mock测试时,报错bucket不存在
这是 register_extracts_by_location 方法:
class ProcessTracker:
# ... other methods and init here.
def register_extracts_by_location(self, location_path, location_name=None):
"""
For a given location, find all files and attempt to register them.
:param location_name: Name of the location
:param location_path: Path of the location
:return:
"""
location = LocationTracker(location_path=location_path, location_name=location_name)
if location.location_type.location_type_name == "s3":
s3 = boto3.resource("s3")
path = location.location_path
if path.startswith("s3://"):
path = path[len("s3://")]
bucket = s3.Bucket(path)
for file in bucket.objects.all():
ExtractTracker(process_run=self
, filename=file
, location=location
, status='ready')
else:
for file in os.listdir(location_path):
ExtractTracker(process_run=self
, filename=file
, location=location
, status='ready')
测试的相关部分在这里:
def test_register_extracts_by_location_s3(self):
"""
Testing that when the location is s3, all the extracts are registered and set to 'ready' status.
The process/extract relationship should also be set to 'ready' since that is the last status the process set
the extracts to.
:return:
"""
process_status = aliased(ExtractStatus)
extract_status = aliased(ExtractStatus)
expected_keys = 'test_local_dir_1.csv', 'test_local_dir_2.csv'
with moto.mock_s3():
conn = boto3.resource('s3', region_name='us-east-1')
conn.create_bucket(Bucket='test_bucket')
for file in expected_keys:
conn.Object('test_bucket', file)
self.process_tracker.register_extracts_by_location(location_path='s3://test_bucket')
boto3 似乎仍在连接中,但我现在不确定。收到的错误是:
botocore.errorfactory.NoSuchBucket: An error occurred (NoSuchBucket) when calling the ListObjects operation: The specified bucket does not exist
我能够通过创建模拟 s3 存储桶然后在测试中进一步使用它来解决问题。这是我认为按预期工作的完整测试:
def test_register_extracts_by_location_s3(self):
"""
Testing that when the location is s3, all the extracts are registered and set to 'ready' status.
The process/extract relationship should also be set to 'ready' since that is the last status the process set
the extracts to.
:return:
"""
process_status = aliased(ExtractStatus)
extract_status = aliased(ExtractStatus)
test_bucket = "test_bucket"
expected_keys = ["test_local_dir_1.csv", "test_local_dir_2.csv"]
client = boto3.client(
"s3",
region_name="us-east-1",
aws_access_key_id="fake_access_key",
aws_secret_access_key="fake_secret_key",
)
try:
s3 = boto3.resource(
"s3",
region_name="us-east-1",
aws_access_key_id="fake_access_key",
aws_secret_access_key="fake_secret_key",
)
s3.meta.client.head_bucket(Bucket=test_bucket)
except botocore.exceptions.ClientError:
pass
else:
err = "%s should not exist" % test_bucket
raise EnvironmentError(err)
client.create_bucket(Bucket=test_bucket)
current_dir = os.path.dirname(__file__)
fixtures_dir = os.path.join(current_dir, "fixtures")
for file in expected_keys:
key = os.path.join(test_bucket, file)
print(file)
print(key)
print(fixtures_dir)
file = os.path.join(fixtures_dir, file)
client.upload_file(Filename=file, Bucket=test_bucket, Key=key)
self.process_tracker.register_extracts_by_location(
location_path="s3://test_bucket"
)
extracts = (
self.session.query(
Extract.extract_filename,
extract_status.extract_status_name,
process_status.extract_status_name,
)
.join(
ExtractProcess, Extract.extract_id == ExtractProcess.extract_tracking_id
)
.join(
extract_status,
Extract.extract_status_id == extract_status.extract_status_id,
)
.join(
process_status,
ExtractProcess.extract_process_status_id
== process_status.extract_status_id,
)
.filter(
ExtractProcess.process_tracking_id
== self.process_tracker.process_tracking_run.process_tracking_id
)
)
given_result = list()
for extract in extracts:
given_result.append(
[
extract.extract_filename,
extract.extract_status_name,
extract.extract_status_name,
]
)
expected_result = [
["test_bucket/test_local_dir_1.csv", "ready", "ready"],
["test_bucket/test_local_dir_2.csv", "ready", "ready"],
]
self.assertCountEqual(expected_result, given_result)