Spring 云:使用 TestContainters 测试 S3 客户端

Spring Cloud: testing S3 client with TestContainters

我使用 Spring Cloud 的 ResourceLoader 访问 S3,例如:

public class S3DownUpLoader {

private final ResourceLoader resourceLoader;

@Autowired
public S3DownUpLoader(ResourceLoader resourceLoader) {
    this.resourceLoader = resourceLoader;
}
public String storeOnS3(String filename, byte[] data) throws IOException {
    String location = "s3://" + bucket + "/" + filename;
    WritableResource writeableResource = (WritableResource) this.resourceLoader.getResource(location);
    FileCopyUtils.copy( data, writeableResource.getOutputStream());
    return filename;
}

它工作正常,我需要帮助来测试 Localstack/Testcontainers 的代码。我尝试了以下测试,但它不起作用 - 我的生产配置文件被提取(未注入具有 localstack 配置的 s3 客户端):

@RunWith(SpringRunner.class)
@SpringBootTest
public class S3DownUpLoaderTest {

@ClassRule
public static LocalStackContainer localstack = new LocalStackContainer().withServices(S3);

@Autowired
S3DownUpLoader s3DownUpLoader;

@Test
public void testA() {
     s3DownUpLoader.storeOnS3(...);
}


@TestConfiguration
@EnableContextResourceLoader
public static class S3Configuration {

    @Primary
    @Bean(destroyMethod = "shutdown")
    public AmazonS3 amazonS3() {
        return AmazonS3ClientBuilder
                .standard()
                .withEndpointConfiguration(localstack.getEndpointConfiguration(S3))
                .withCredentials(localstack.getDefaultCredentialsProvider())
                .build();
    }

}
}

正如我们在 GitHub

上讨论的那样

我们以稍微不同的方式解决这个问题。我其实从来没有见过你使用WritableResource的方式,看起来很有趣。 None少了,我们是这样解决这个问题的:

@RunWith(SpringRunner.class)
@SpringBootTest(properties = "spring.profiles.active=test")
@ContextConfiguration(classes = AbstractAmazonS3Test.S3Configuration.class)
public abstract class AbstractAmazonS3Test  {

    private static final String REGION = Regions.EU_WEST_1.getName();

    /**
     * Configure S3.
     */
    @TestConfiguration
    public static class S3Configuration {

        @Bean
        public AmazonS3 amazonS3() {
            //localstack docker image is running locally on port 4572 for S3
            final String serviceEndpoint = String.format("http://%s:%s", "127.0.0.1", "4572");
            return AmazonS3Client.builder()
                .withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, REGION))
                .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials("dummyKey", "dummySecret")))
                .build();
        }
    }

}

和样本测试:

public class CsvS3UploadServiceIntegrationTest extends AbstractAmazonS3Test {

    private static final String SUCCESS_CSV = "a,b";
    private static final String STANDARD_STORAGE = "STANDARD";

    @Autowired
    private AmazonS3 s3;

    @Autowired
    private S3ConfigurationProperties properties;

    @Autowired
    private CsvS3UploadService service;

    @Before
    public void setUp() {
        s3.createBucket(properties.getBucketName());
    }

    @After
    public void tearDown() {
        final String bucketName = properties.getBucketName();
        s3.listObjects(bucketName).getObjectSummaries().stream()
            .map(S3ObjectSummary::getKey)
            .forEach(key -> s3.deleteObject(bucketName, key));
        s3.deleteBucket(bucketName);
    }

    @Test
    public void uploadSuccessfulCsv() {
        service.uploadSuccessfulCsv(SUCCESS_CSV);
        final S3ObjectSummary s3ObjectSummary = getOnlyFileFromS3();
        assertThat(s3ObjectSummary.getKey(), containsString("-success.csv"));
        assertThat(s3ObjectSummary.getETag(), is("b345e1dc09f20fdefdea469f09167892"));
        assertThat(s3ObjectSummary.getStorageClass(), is(STANDARD_STORAGE));
        assertThat(s3ObjectSummary.getSize(), is(3L));
    }

    private S3ObjectSummary getOnlyFileFromS3() {
        final ObjectListing listing = s3.listObjects(properties.getBucketName());
        final List<S3ObjectSummary> objects = listing.getObjectSummaries();
        assertThat(objects, iterableWithSize(1));
        return Iterables.getOnlyElement(objects);
    }
}

以及被测代码:

@Service
@RequiredArgsConstructor
@EnableConfigurationProperties(S3ConfigurationProperties.class)
public class CsvS3UploadServiceImpl implements CsvS3UploadService {

    private static final String CSV_MIME_TYPE = CSV_UTF_8.toString();

    private final AmazonS3 amazonS3;
    private final S3ConfigurationProperties properties;
    private final S3ObjectKeyService s3ObjectKeyService;

    @Override
    public void uploadSuccessfulCsv(final String source) {
        final String key = s3ObjectKeyService.getSuccessKey();
        doUpload(source, key, getObjectMetadata(source));
    }

    private void doUpload(final String source, final String key, final ObjectMetadata metadata) {
        try (ReaderInputStream in = new ReaderInputStream(new StringReader(source), UTF_8)) {
            final PutObjectRequest request = new PutObjectRequest(properties.getBucketName(), key, in, metadata);
            amazonS3.putObject(request);
        } catch (final IOException ioe) {
            throw new CsvUploadException("Unable to upload " + key, ioe);
        }
    }

    private ObjectMetadata getObjectMetadata(final String source) {
        final ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentType(CSV_MIME_TYPE);
        metadata.setContentLength(source.getBytes(UTF_8).length);
        metadata.setContentMD5(getMD5ChecksumAsBase64(source));
        metadata.setSSEAlgorithm(SSEAlgorithm.KMS.getAlgorithm());
        return metadata;
    }

    private String getMD5ChecksumAsBase64(final String source) {
        final HashCode md5 = Hashing.md5().hashString(source, UTF_8);
        return Base64.getEncoder().encodeToString(md5.asBytes());
    }
}

似乎为 ResourceLoader 提供自定义 amazonS3 bean 的唯一方法是手动注入它。测试看起来像

@RunWith(SpringRunner.class)
@SpringBootTest
@ContextConfiguration(classes = S3DownUpLoaderTest.S3Configuration.class)
public class S3DownUpLoaderTest implements ApplicationContextAware {

private static final String BUCKET_NAME = "bucket";

@ClassRule
public static LocalStackContainer localstack = new LocalStackContainer().withServices(S3);

@Autowired
S3DownUpLoader s3DownUpLoader;

@Autowired
SimpleStorageProtocolResolver resourceLoader;

@Autowired
AmazonS3 amazonS3;

@Before
public void setUp(){
    amazonS3.createBucket(BUCKET_NAME);
}

@Test
public void someTestA() throws IOException {
    ....

}

@After
public void tearDown(){
    ObjectListing object_listing = amazonS3.listObjects(QLM_BUCKET_NAME);
    while (true) {
        for (S3ObjectSummary summary : object_listing.getObjectSummaries()) {
            amazonS3.deleteObject(BUCKET_NAME, summary.getKey());
        }

        // more object_listing to retrieve?
        if (object_listing.isTruncated()) {
            object_listing = amazonS3.listNextBatchOfObjects(object_listing);
        } else {
            break;
        }
    };

    amazonS3.deleteBucket(BUCKET_NAME);
}

@Override
public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
    if (applicationContext instanceof ConfigurableApplicationContext) {
        ConfigurableApplicationContext configurableApplicationContext = (ConfigurableApplicationContext) applicationContext;
        configurableApplicationContext.addProtocolResolver(this.resourceLoader);
    }

}

public static class S3Configuration {

    @Bean
    public S3DownUpLoader s3DownUpLoader(ResourceLoader resourceLoader){
        return new S3DownUpLoader(resourceLoader);
    }

    @Bean(destroyMethod = "shutdown")
    public AmazonS3 amazonS3() {
        return AmazonS3ClientBuilder
                .standard()
                .withEndpointConfiguration(localstack.getEndpointConfiguration(S3))
                .withCredentials(localstack.getDefaultCredentialsProvider())
                .build();
    }

    @Bean
    public SimpleStorageProtocolResolver resourceLoader(){
        return new SimpleStorageProtocolResolver(amazonS3());
    }

}