如何将 Python 脚本编写为 Amazon Lambda 函数?
How to write Python script as Amazon Lambda function?
我想将下面的 Python 脚本编写为 Amazon lambda 函数,该脚本将 RabbitMQ 指标发布到 Amazon cloudwatch,我已经尝试了几次并设法获得了 rabbitmq 深度,但我的 Lambda 函数无法发布cloudwatch 的指标。
from __future__ import with_statement, print_function
from pyrabbit.api import Client
import boto3
import os
host = ""
username = ""
password = ""
vhost = ""
namespace = ""
def get_queue_depths(host, username, password, vhost):
cl = Client(host, username, password)
if not cl.is_alive():
raise Exception("Failed to connect to rabbitmq")
depths = {}
queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
for queue in queues:
if queue == "aliveness-test":
continue
if 'celery' in queue:
continue
depths[queue] = cl.get_queue_depth(vhost, queue)
return depths
def publish_queue_depth_to_cloudwatch(cwc, queue_name, depth, namespace):
float(depth)
cwc = boto3.client('cloudwatch',region_name="us-east-1")
response = client.put_metric_data(
Namespace=namespace,
MetricData=[ { 'MetricName': queue_name, 'Value': depth, 'Unit': 'Count' } ]
)
print("Putting metric namespace=%s name=%s unit=Count value=%f" %
(namespace, queue_name, depth))
def publish_depths_to_cloudwatch(depths, namespace):
for queue in depths:
publish_queue_depth_to_cloudwatch(cwc, queue, depths[queue], namespace)
def get_queue_depths_and_publish_to_cloudwatch(host, username, password, vhost, namespace):
depths = get_queue_depths(host, username, password, vhost)
publish_depths_to_cloudwatch(depths, namespace)
if __name__ == "__main__":
while True:
get_queue_depths_and_publish_to_cloudwatch(host, username, password, vhost, namespace)
通过向 VPC 添加 NAT 网关以使 lambda 函数能够访问 Aws 资源来解决问题。正如 Mark B 在评论中所建议的
我想将下面的 Python 脚本编写为 Amazon lambda 函数,该脚本将 RabbitMQ 指标发布到 Amazon cloudwatch,我已经尝试了几次并设法获得了 rabbitmq 深度,但我的 Lambda 函数无法发布cloudwatch 的指标。
from __future__ import with_statement, print_function
from pyrabbit.api import Client
import boto3
import os
host = ""
username = ""
password = ""
vhost = ""
namespace = ""
def get_queue_depths(host, username, password, vhost):
cl = Client(host, username, password)
if not cl.is_alive():
raise Exception("Failed to connect to rabbitmq")
depths = {}
queues = [q['name'] for q in cl.get_queues(vhost=vhost)]
for queue in queues:
if queue == "aliveness-test":
continue
if 'celery' in queue:
continue
depths[queue] = cl.get_queue_depth(vhost, queue)
return depths
def publish_queue_depth_to_cloudwatch(cwc, queue_name, depth, namespace):
float(depth)
cwc = boto3.client('cloudwatch',region_name="us-east-1")
response = client.put_metric_data(
Namespace=namespace,
MetricData=[ { 'MetricName': queue_name, 'Value': depth, 'Unit': 'Count' } ]
)
print("Putting metric namespace=%s name=%s unit=Count value=%f" %
(namespace, queue_name, depth))
def publish_depths_to_cloudwatch(depths, namespace):
for queue in depths:
publish_queue_depth_to_cloudwatch(cwc, queue, depths[queue], namespace)
def get_queue_depths_and_publish_to_cloudwatch(host, username, password, vhost, namespace):
depths = get_queue_depths(host, username, password, vhost)
publish_depths_to_cloudwatch(depths, namespace)
if __name__ == "__main__":
while True:
get_queue_depths_and_publish_to_cloudwatch(host, username, password, vhost, namespace)
通过向 VPC 添加 NAT 网关以使 lambda 函数能够访问 Aws 资源来解决问题。正如 Mark B 在评论中所建议的