错误。在kafka中使用Spark结构化流来读写数据到另一个主题。

问题描述 投票:0回答:1

我正在做一个小任务,用一个kafka主题读取access_logs文件,然后我计算状态,并将状态的计数发送到另一个kafka主题。但是我一直收到一些错误,比如,当我使用no output mode或者append mode的时候。

Exception in thread "main" org.apache.spark.sql.AnalysisException: Append output mode not supported when there are streaming aggregations on streaming DataFrames/DataSets without watermark;;

当我使用完整模式时。

Exception in thread "main" org.apache.spark.sql.streaming.StreamingQueryException: requirement failed: KafkaTable does not support Complete mode.

这是我的代码。structuredStreaming.scala

package com.spark.sparkstreaming

import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.sql._
import org.apache.log4j._
import org.apache.spark.sql.functions._

import java.util.regex.Pattern
import java.util.regex.Matcher
import java.text.SimpleDateFormat
import java.util.Locale

import Utilities._
object structuredStreaming {

  case class LogEntry(ip:String, client:String, user:String, dateTime:String, request:String, status:String, bytes:String, referer:String, agent:String)

  val logPattern = apacheLogPattern()
  val datePattern = Pattern.compile("\\[(.*?) .+]")

  def parseDateField(field: String): Option[String] = {

    val dateMatcher = datePattern.matcher(field)
    if (dateMatcher.find) {
      val dateString = dateMatcher.group(1)
      val dateFormat = new SimpleDateFormat("dd/MMM/yyyy:HH:mm:ss", Locale.ENGLISH)
      val date = (dateFormat.parse(dateString))
      val timestamp = new java.sql.Timestamp(date.getTime());
      return Option(timestamp.toString())
    } else {
      None
    }
  }

  def parseLog(x:Row) : Option[LogEntry] = {

    val matcher:Matcher = logPattern.matcher(x.getString(0));
    if (matcher.matches()) {
      val timeString = matcher.group(4)
      return Some(LogEntry(
        matcher.group(1),
        matcher.group(2),
        matcher.group(3),
        parseDateField(matcher.group(4)).getOrElse(""),
        matcher.group(5),
        matcher.group(6),
        matcher.group(7),
        matcher.group(8),
        matcher.group(9)
      ))
    } else {
      return None
    }
  }

  def main(args: Array[String]) {

    val spark = SparkSession
      .builder
      .appName("StructuredStreaming")
      .master("local[*]")
      .config("spark.sql.streaming.checkpointLocation", "/home/UDHAV.MAHATA/Documents/Checkpoints")
      .getOrCreate()

    setupLogging()

//    val rawData = spark.readStream.text("/home/UDHAV.MAHATA/Documents/Spark/logs")
    val rawData = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("subscribe", "testing")
      .load()
    import spark.implicits._

    val structuredData = rawData.flatMap(parseLog).select("status")
    val windowed = structuredData.groupBy($"status").count()
    //val query = windowed.writeStream.outputMode("complete").format("console").start()
    val query = windowed
      .writeStream
        .outputMode("complete")
      .format("kafka")
      .option("kafka.bootstrap.servers", "localhost:9092")
      .option("topic", "sink")
      .start()
    query.awaitTermination()
    spark.stop()
  }

}

实用程序.scala

package com.spark.sparkstreaming

import org.apache.log4j.Level
import java.util.regex.Pattern
import java.util.regex.Matcher

object Utilities {
  def setupLogging() = {
    import org.apache.log4j.{Level, Logger}
    val rootLogger = Logger.getRootLogger()
    rootLogger.setLevel(Level.ERROR)
  }
 def apacheLogPattern():Pattern = {
    val ddd = "\\d{1,3}"
    val ip = s"($ddd\\.$ddd\\.$ddd\\.$ddd)?"
    val client = "(\\S+)"
    val user = "(\\S+)"
    val dateTime = "(\\[.+?\\])"
    val request = "\"(.*?)\""
    val status = "(\\d{3})"
    val bytes = "(\\S+)"
    val referer = "\"(.*?)\""
    val agent = "\"(.*?)\""
    val regex = s"$ip $client $user $dateTime $request $status $bytes $referer $agent"
    Pattern.compile(regex)
  }
}

谁能帮帮我,我哪里做错了?

scala apache-spark apache-kafka spark-streaming spark-structured-streaming
1个回答
1
投票

正如错误信息所提示的那样,你需要给你的分组添加一个水印。

替换这一行

val windowed = structuredData.groupBy($"status").count()

import org.apache.spark.sql.functions.{window, col}

val windowed = structuredData.groupBy(window(col("dateTime"), "10 minutes"), "status").count()

重要的是,该列 dateTime 属于 timestamp 如果我没有理解错的话,你从Kafka源码中解析出来的。

如果没有这个窗口,Spark将不知道要聚合多少数据。

© www.soinside.com 2019 - 2024. All rights reserved.