如何批量处理输入记录的子集,即 3 秒批处理时间的第一秒?

How to process a subset of input records in a batch, i.e. the first second in 3-sec batch time?

如果我在StreamingContext中设置批处理时间Seconds(1),像这样:

val ssc = new StreamingContext(sc, Seconds(1))

3秒会收到3秒的数据,但是我只需要前几秒的数据,我可以丢弃后面2秒的数据。那我可以花3秒只处理第一秒的数据吗?

如果您跟踪计数器,您可以通过 updateStateByKey 执行此操作,例如如下所示:

import org.apache.spark.SparkContext
import org.apache.spark.streaming.dstream.ConstantInputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

object StreamEveryThirdApp {

  def main(args: Array[String]) {
    val sc = new SparkContext("local[*]", "Streaming Test")
    implicit val ssc = new StreamingContext(sc, Seconds(1))
    ssc.checkpoint("./checkpoint")

    // generate stream
    val inputDStream = createConstantStream

    // increase seconds counter
    val accStream = inputDStream.updateStateByKey(updateState)

    // keep only 1st second records
    val firstOfThree = accStream.filter { case (key, (value, counter)) => counter == 1}

    firstOfThree.print()

    ssc.start()
    ssc.awaitTermination()

  }

  def updateState: (Seq[Int], Option[(Option[Int], Int)]) => Option[(Option[Int], Int)] = {
    case(values, state) =>
      state match {
        // If no previous state, i.e. set first Second
        case None => Some(Some(values.sum), 1)
        // If this is 3rd second - remove state
        case Some((prevValue, 3)) => None
        // If this is not the first second - increase seconds counter, but don't calculate values
        case Some((prevValue, counter)) => Some((None, counter + 1))
    }
  }

  def createConstantStream(implicit ssc: StreamingContext): ConstantInputDStream[(String, Int)] = {
    val seq = Seq(
      ("key1", 1),
      ("key2", 3),
      ("key1", 2),
      ("key1", 2)
    )
    val rdd = ssc.sparkContext.parallelize(seq)
    val inputDStream = new ConstantInputDStream(ssc, rdd)
    inputDStream
  }
}

如果您的数据中有时间信息,您也可以使用 3 秒 window stream.window(Seconds(3), Seconds(3)) 并根据数据中的时间信息过滤记录,通常这是首选方法