AWS Glue:筛选日期字段
AWS Glue : Filter date field
我有一个非常基本的问题,我创建了一个 AWS Glue 作业,我需要在从 dynamodb table 中提取数据时创建一个过滤器。我只需要使用名为“time”的字段提取前一天的数据。
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import re
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
DataSource0 = glueContext.create_dynamic_frame.from_catalog(database = "sampledb", table_name = "tablex", transformation_ctx = "DataSource0")
Transform1 = ApplyMapping.apply(frame = DataSource0, mappings = [("allresults.paymentCapabilityResult.paymentCapabilityCheckResult", "boolean", "allresults.paymentCapabilityResult.paymentCapabilityCheckResult", "boolean"), ("time", "string", "time", "timestamp")], transformation_ctx = "Transform1")
DataSink0 = glueContext.write_dynamic_frame.from_options(frame = Transform1, connection_type = "s3", format = "json", connection_options = {"path": "s3://xxxxx/output/", "partitionKeys": []}, transformation_ctx = "DataSink0")
job.commit()
昨天的过滤器可以在 PySpark 中轻松完成,如所讨论的 。脚本如下所示:
from pyspark.sql import functions as F
df = Transform1.toDF()
df = df.where(F.col("time") == F.date_sub(F.current_date(), 1))
Transform2 = DynamicFrame.fromDF(df, glue_ctx=glueContext, name="df")
我有一个非常基本的问题,我创建了一个 AWS Glue 作业,我需要在从 dynamodb table 中提取数据时创建一个过滤器。我只需要使用名为“time”的字段提取前一天的数据。
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
import re
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
DataSource0 = glueContext.create_dynamic_frame.from_catalog(database = "sampledb", table_name = "tablex", transformation_ctx = "DataSource0")
Transform1 = ApplyMapping.apply(frame = DataSource0, mappings = [("allresults.paymentCapabilityResult.paymentCapabilityCheckResult", "boolean", "allresults.paymentCapabilityResult.paymentCapabilityCheckResult", "boolean"), ("time", "string", "time", "timestamp")], transformation_ctx = "Transform1")
DataSink0 = glueContext.write_dynamic_frame.from_options(frame = Transform1, connection_type = "s3", format = "json", connection_options = {"path": "s3://xxxxx/output/", "partitionKeys": []}, transformation_ctx = "DataSink0")
job.commit()
昨天的过滤器可以在 PySpark 中轻松完成,如所讨论的
from pyspark.sql import functions as F
df = Transform1.toDF()
df = df.where(F.col("time") == F.date_sub(F.current_date(), 1))
Transform2 = DynamicFrame.fromDF(df, glue_ctx=glueContext, name="df")