AWS ECS 没有链接我的容器
AWS ECS is not linking my containers
我正在将我工作的公司的部分基础设施迁移到 Amazon ECS,我一直在尝试让我的服务器容器连接到数据库容器。
以下是我在任务中的设置方式:
{
"requiresAttributes": [
{
"value": null,
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.17",
"targetId": null,
"targetType": null
},
{
"value": null,
"name": "com.amazonaws.ecs.capability.logging-driver.syslog",
"targetId": null,
"targetType": null
},
{
"value": null,
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18",
"targetId": null,
"targetType": null
},
{
"value": null,
"name": "com.amazonaws.ecs.capability.ecr-auth",
"targetId": null,
"targetType": null
}
],
"taskDefinitionArn": "arn:aws:ecs:us-east-1:9621234232917455:task-definition/ecv-server:12",
"networkMode": "bridge",
"status": "ACTIVE",
"revision": 12,
"taskRoleArn": null,
"containerDefinitions": [
{
"volumesFrom": [],
"memory": 500,
"extraHosts": null,
"dnsServers": [],
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [],
"hostname": "db",
"essential": true,
"entryPoint": null,
"mountPoints": [
{
"containerPath": "/var/lib/postgresql/data",
"sourceVolume": "dbdata",
"readOnly": null
}
],
"name": "db",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [
{
"name": "POSTGRES_PASSWORD",
"value": "jmbrito"
},
{
"name": "POSTGRES_USER",
"value": "jmbrito"
}
],
"links": [],
"workingDirectory": null,
"readonlyRootFilesystem": null,
"image": "postgres",
"command": null,
"user": null,
"dockerLabels": null,
"logConfiguration": {
"logDriver": "syslog",
"options": null
},
"cpu": 0,
"privileged": null,
"memoryReservation": null
},
{
"volumesFrom": [],
"memory": 400,
"extraHosts": null,
"dnsServers": [],
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [],
"hostname": "redis",
"essential": true,
"entryPoint": null,
"mountPoints": [
{
"containerPath": "/data",
"sourceVolume": "redisdata",
"readOnly": null
}
],
"name": "redis",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [],
"links": null,
"workingDirectory": null,
"readonlyRootFilesystem": null,
"image": "redis:3.2-alpine",
"command": [
"redis-server"
],
"user": null,
"dockerLabels": null,
"logConfiguration": {
"logDriver": "syslog",
"options": null
},
"cpu": 0,
"privileged": null,
"memoryReservation": null
},
{
"volumesFrom": [],
"memory": 600,
"extraHosts": null,
"dnsServers": null,
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [
{
"hostPort": 80,
"containerPort": 3000,
"protocol": "tcp"
}
],
"hostname": null,
"essential": true,
"entryPoint": [],
"mountPoints": [],
"name": "server",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [
{
"name": "RAILS_ENV",
"value": "production"
}
],
"links": [
"db:db",
"redis:redis"
],
"workingDirectory": "/usr/src/app",
"readonlyRootFilesystem": null,
"image": "MY DOCKER LINK IN ECR",
"command": [
"sh",
"deploy/init.sh"
],
"user": null,
"dockerLabels": null,
"logConfiguration": {
"logDriver": "syslog",
"options": null
},
"cpu": 0,
"privileged": null,
"memoryReservation": null
}
],
"placementConstraints": [],
"volumes": [
{
"host": {
"sourcePath": null
},
"name": "dbdata"
},
{
"host": {
"sourcePath": null
},
"name": "redisdata"
}
],
"family": "ecv-server"
}
如您所见,我正确设置了我的链接字段,当我尝试使用连接到主机数据库或主机 redis 等名称进行连接时,它找不到它。
我尝试使用 VPC 地址连接到其他容器并且成功了。唯一的问题是,为此我更喜欢在 VPC 中设置自己的地址(比如将 172.13.0.2 设置为 db),因为当我不设置时,系统会按连接顺序获取地址。
希望你能理解我的问题。
谢谢。
我认为这个任务定义不会按照您希望的方式工作。当您将三个容器放在一个任务定义中时,它会告诉 ECS 每次部署任务时始终将这三个容器一起部署在同一台机器上。
因此,如果您为此任务部署的服务数量为三个,您将获得三个应用程序容器、三个 postgres 容器和三个 redis 容器。这三个应用程序容器将具有三个独立的数据持久性堆栈。应用程序容器 A 将仅与 postgres A 和 redis A 通信,而应用程序容器 B 将仅与 postgress B 和 redis B 通信,因此每个应用程序容器将具有不相互复制的不一致数据。
运行 任务定义中的多个容器实际上仅适用于 sidecar 容器,例如反向代理、临时缓存或类似容器。
对于持久层,我在这里的建议是将 Amazon RDS 用于您的 postgres,将 Amazon Elasticache 用于您的 redis。这样,您的所有任务都可以共享相同的 postgres 和相同的 redis,并且使用这些 Amazon 服务,您的管理工作也将大大减少。
我正在将我工作的公司的部分基础设施迁移到 Amazon ECS,我一直在尝试让我的服务器容器连接到数据库容器。
以下是我在任务中的设置方式:
{
"requiresAttributes": [
{
"value": null,
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.17",
"targetId": null,
"targetType": null
},
{
"value": null,
"name": "com.amazonaws.ecs.capability.logging-driver.syslog",
"targetId": null,
"targetType": null
},
{
"value": null,
"name": "com.amazonaws.ecs.capability.docker-remote-api.1.18",
"targetId": null,
"targetType": null
},
{
"value": null,
"name": "com.amazonaws.ecs.capability.ecr-auth",
"targetId": null,
"targetType": null
}
],
"taskDefinitionArn": "arn:aws:ecs:us-east-1:9621234232917455:task-definition/ecv-server:12",
"networkMode": "bridge",
"status": "ACTIVE",
"revision": 12,
"taskRoleArn": null,
"containerDefinitions": [
{
"volumesFrom": [],
"memory": 500,
"extraHosts": null,
"dnsServers": [],
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [],
"hostname": "db",
"essential": true,
"entryPoint": null,
"mountPoints": [
{
"containerPath": "/var/lib/postgresql/data",
"sourceVolume": "dbdata",
"readOnly": null
}
],
"name": "db",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [
{
"name": "POSTGRES_PASSWORD",
"value": "jmbrito"
},
{
"name": "POSTGRES_USER",
"value": "jmbrito"
}
],
"links": [],
"workingDirectory": null,
"readonlyRootFilesystem": null,
"image": "postgres",
"command": null,
"user": null,
"dockerLabels": null,
"logConfiguration": {
"logDriver": "syslog",
"options": null
},
"cpu": 0,
"privileged": null,
"memoryReservation": null
},
{
"volumesFrom": [],
"memory": 400,
"extraHosts": null,
"dnsServers": [],
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [],
"hostname": "redis",
"essential": true,
"entryPoint": null,
"mountPoints": [
{
"containerPath": "/data",
"sourceVolume": "redisdata",
"readOnly": null
}
],
"name": "redis",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [],
"links": null,
"workingDirectory": null,
"readonlyRootFilesystem": null,
"image": "redis:3.2-alpine",
"command": [
"redis-server"
],
"user": null,
"dockerLabels": null,
"logConfiguration": {
"logDriver": "syslog",
"options": null
},
"cpu": 0,
"privileged": null,
"memoryReservation": null
},
{
"volumesFrom": [],
"memory": 600,
"extraHosts": null,
"dnsServers": null,
"disableNetworking": null,
"dnsSearchDomains": null,
"portMappings": [
{
"hostPort": 80,
"containerPort": 3000,
"protocol": "tcp"
}
],
"hostname": null,
"essential": true,
"entryPoint": [],
"mountPoints": [],
"name": "server",
"ulimits": null,
"dockerSecurityOptions": null,
"environment": [
{
"name": "RAILS_ENV",
"value": "production"
}
],
"links": [
"db:db",
"redis:redis"
],
"workingDirectory": "/usr/src/app",
"readonlyRootFilesystem": null,
"image": "MY DOCKER LINK IN ECR",
"command": [
"sh",
"deploy/init.sh"
],
"user": null,
"dockerLabels": null,
"logConfiguration": {
"logDriver": "syslog",
"options": null
},
"cpu": 0,
"privileged": null,
"memoryReservation": null
}
],
"placementConstraints": [],
"volumes": [
{
"host": {
"sourcePath": null
},
"name": "dbdata"
},
{
"host": {
"sourcePath": null
},
"name": "redisdata"
}
],
"family": "ecv-server"
}
如您所见,我正确设置了我的链接字段,当我尝试使用连接到主机数据库或主机 redis 等名称进行连接时,它找不到它。
我尝试使用 VPC 地址连接到其他容器并且成功了。唯一的问题是,为此我更喜欢在 VPC 中设置自己的地址(比如将 172.13.0.2 设置为 db),因为当我不设置时,系统会按连接顺序获取地址。
希望你能理解我的问题。
谢谢。
我认为这个任务定义不会按照您希望的方式工作。当您将三个容器放在一个任务定义中时,它会告诉 ECS 每次部署任务时始终将这三个容器一起部署在同一台机器上。
因此,如果您为此任务部署的服务数量为三个,您将获得三个应用程序容器、三个 postgres 容器和三个 redis 容器。这三个应用程序容器将具有三个独立的数据持久性堆栈。应用程序容器 A 将仅与 postgres A 和 redis A 通信,而应用程序容器 B 将仅与 postgress B 和 redis B 通信,因此每个应用程序容器将具有不相互复制的不一致数据。
运行 任务定义中的多个容器实际上仅适用于 sidecar 容器,例如反向代理、临时缓存或类似容器。
对于持久层,我在这里的建议是将 Amazon RDS 用于您的 postgres,将 Amazon Elasticache 用于您的 redis。这样,您的所有任务都可以共享相同的 postgres 和相同的 redis,并且使用这些 Amazon 服务,您的管理工作也将大大减少。