Apache Zeppelin 和 Spark 显示 Scala 模块 2.13.3 需要 Jackson Databind 版本 >= 2.13.0 和 < 2.14.0 - Found jackson-databind version 2.12.0

问题描述 投票:0回答:1

我在 Docker 上使用 Apache Zeppelin 0.10.1 和 Spark 3.3.0,我一直收到这个错误...

com.fasterxml.jackson.databind.JsonMappingException: Scala module 2.13.3 requires Jackson Databind version >= 2.13.0 and < 2.14.0 - Found jackson-databind version 2.12.0
  at com.fasterxml.jackson.module.scala.JacksonModule.setupModule(JacksonModule.scala:61)
  at com.fasterxml.jackson.module.scala.JacksonModule.setupModule$(JacksonModule.scala:46)
  at com.fasterxml.jackson.module.scala.DefaultScalaModule.setupModule(DefaultScalaModule.scala:17)
  at com.fasterxml.jackson.databind.ObjectMapper.registerModule(ObjectMapper.java:835)
  at com.fasterxml.jackson.databind.cfg.MapperBuilder.addModule(MapperBuilder.java:243)
  at org.apache.spark.SparkThrowableHelper$.<init>(ErrorInfo.scala:53)
  at org.apache.spark.SparkThrowableHelper$.<clinit>(ErrorInfo.scala)
  at org.apache.spark.sql.AnalysisException.<init>(AnalysisException.scala:44)
  at org.apache.spark.sql.AnalysisException.<init>(AnalysisException.scala:50)
  at org.apache.spark.sql.errors.QueryCompilationErrors$.upCastFailureError(QueryCompilationErrors.scala:171)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$.org$apache$spark$sql$catalyst$analysis$Analyzer$ResolveUpCast$$fail(Analyzer.scala:3621)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$$anonfun$apply$60$$anonfun$applyOrElse$183.applyOrElse(Analyzer.scala:3652)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$$anonfun$apply$60$$anonfun$applyOrElse$183.applyOrElse(Analyzer.scala:3629)
  at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:584)
  at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:176)
  at org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:584)
  at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$3(TreeNode.scala:589)
  at org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren(TreeNode.scala:1228)
  at org.apache.spark.sql.catalyst.trees.UnaryLike.mapChildren$(TreeNode.scala:1227)
  at org.apache.spark.sql.catalyst.expressions.UnaryExpression.mapChildren(Expression.scala:513)
  at org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:589)
  at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$3(TreeNode.scala:589)
  at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)
  at scala.collection.Iterator.foreach(Iterator.scala:943)
  at scala.collection.Iterator.foreach$(Iterator.scala:943)
  at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
  at scala.collection.IterableLike.foreach(IterableLike.scala:74)
  at scala.collection.IterableLike.foreach$(IterableLike.scala:73)
  at scala.collection.AbstractIterable.foreach(Iterable.scala:56)
  at scala.collection.TraversableLike.map(TraversableLike.scala:286)
  at scala.collection.TraversableLike.map$(TraversableLike.scala:279)
  at scala.collection.AbstractTraversable.map(Traversable.scala:108)
  at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:698)
  at org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:589)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$transformExpressionsDownWithPruning$1(QueryPlan.scala:159)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$1(QueryPlan.scala:200)
  at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:176)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:200)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.recursiveTransform$1(QueryPlan.scala:211)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.$anonfun$mapExpressions$4(QueryPlan.scala:221)
  at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:427)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:221)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpressionsDownWithPruning(QueryPlan.scala:159)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpressionsWithPruning(QueryPlan.scala:130)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$$anonfun$apply$60.applyOrElse(Analyzer.scala:3629)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$$anonfun$apply$60.applyOrElse(Analyzer.scala:3625)
  at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$3(AnalysisHelper.scala:138)
  at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:176)
  at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.$anonfun$resolveOperatorsUpWithPruning$1(AnalysisHelper.scala:138)
  at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.allowInvokingTransformsInAnalyzer(AnalysisHelper.scala:323)
  at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning(AnalysisHelper.scala:134)
  at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.resolveOperatorsUpWithPruning$(AnalysisHelper.scala:130)
  at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperatorsUpWithPruning(LogicalPlan.scala:30)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$.apply(Analyzer.scala:3625)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveUpCast$.apply(Analyzer.scala:3615)
  at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:211)
  at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
  at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
  at scala.collection.immutable.List.foldLeft(List.scala:91)
  at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:208)
  at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:200)
  at scala.collection.immutable.List.foreach(List.scala:431)
  at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:200)
  at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:227)
  at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:223)
  at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:172)
  at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:223)
  at org.apache.spark.sql.catalyst.encoders.ExpressionEncoder.resolveAndBind(ExpressionEncoder.scala:345)
  at org.apache.spark.sql.Dataset.resolvedEnc$lzycompute(Dataset.scala:240)
  at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$resolvedEnc(Dataset.scala:239)
  at org.apache.spark.sql.Dataset$.apply(Dataset.scala:83)
  at org.apache.spark.sql.Dataset.as(Dataset.scala:465)
  ... 44 elided

每当我尝试运行这个...

val mysqlURL= "jdbc:mysql://<url>:3306/<table>"

val pdf = spark.read.format("jdbc").option("driver", "com.mysql.cj.jdbc.Driver").option("url", mysqlURL).option("dbtable", "products").option("user", "...").option("password","...").load()

val pdfCombinations = pdf.crossJoin(pdf).dropDuplicates();

pdfCombinations.describe()

我在其他一些页面上看到我应该尝试检查 jackson-databind 依赖冲突,但我似乎找不到任何冲突。代码只会在我尝试运行操作时中断。我正在为我的 spark 解释器使用 Zeppelin 上 spark 的所有默认配置,并且我还使用来自 docker 的库存 zeppelin 图像。

docker scala apache-spark apache-spark-sql apache-zeppelin
1个回答
0
投票

Scala 2.13 上的 Spark 3 + 仅从 Zeppelin 版本 0.11+ 开始支持

© www.soinside.com 2019 - 2024. All rights reserved.