Spark MLlib libsvm涉及数据问题

问题描述 投票:1回答:2

我正在尝试使用scala版本的示例在http://spark.apache.org/docs/1.2.1/mllib-linear-methods.html中进行演示。我运行演示它工作得很好但是当我改变数据和训练它的步骤时只是错误

15/05/05 16:32:02 INFO TaskSetManager: Starting task 0.0 in stage 12.0 (TID 21, localhost, PROCESS_LOCAL, 1447 bytes)
15/05/05 16:32:02 INFO TaskSetManager: Starting task 1.0 in stage 12.0 (TID 22, localhost, PROCESS_LOCAL, 1447 bytes)
15/05/05 16:32:02 INFO Executor: Running task 0.0 in stage 12.0 (TID 21)
15/05/05 16:32:02 INFO Executor: Running task 1.0 in stage 12.0 (TID 22)
15/05/05 16:32:02 INFO BlockManager: Found block rdd_7_1 locally
15/05/05 16:32:02 ERROR Executor: Exception in task 1.0 in stage 12.0 (TID 22)
java.lang.ArrayIndexOutOfBoundsException: -1
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:136)
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:106)
    at org.apache.spark.mllib.optimization.HingeGradient.compute(Gradient.scala:313)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:192)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:190)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
    at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
    at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
    at org.apache.spark.scheduler.Task.run(Task.scala:64)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
15/05/05 16:32:02 INFO BlockManager: Found block rdd_7_0 locally
15/05/05 16:32:02 ERROR Executor: Exception in task 0.0 in stage 12.0 (TID 21)
java.lang.ArrayIndexOutOfBoundsException: -1
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:136)
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:106)
    at org.apache.spark.mllib.optimization.HingeGradient.compute(Gradient.scala:313)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:192)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:190)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
    at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
    at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
    at org.apache.spark.scheduler.Task.run(Task.scala:64)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
15/05/05 16:32:02 WARN TaskSetManager: Lost task 1.0 in stage 12.0 (TID 22, localhost): java.lang.ArrayIndexOutOfBoundsException: -1
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:136)
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:106)
    at org.apache.spark.mllib.optimization.HingeGradient.compute(Gradient.scala:313)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:192)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:190)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
    at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
    at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
    at org.apache.spark.scheduler.Task.run(Task.scala:64)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)

15/05/05 16:32:02 ERROR TaskSetManager: Task 1 in stage 12.0 failed 1 times; aborting job
15/05/05 16:32:02 INFO TaskSchedulerImpl: Removed TaskSet 12.0, whose tasks have all completed, from pool 
15/05/05 16:32:02 INFO TaskSetManager: Lost task 0.0 in stage 12.0 (TID 21) on executor localhost: java.lang.ArrayIndexOutOfBoundsException (-1) [duplicate 1]
15/05/05 16:32:02 INFO TaskSchedulerImpl: Removed TaskSet 12.0, whose tasks have all completed, from pool 
15/05/05 16:32:02 INFO TaskSchedulerImpl: Cancelling stage 12
15/05/05 16:32:02 INFO DAGScheduler: Job 12 failed: treeAggregate at GradientDescent.scala:189, took 0.032101 s
org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 12.0 failed 1 times, most recent failure: Lost task 1.0 in stage 12.0 (TID 22, localhost): java.lang.ArrayIndexOutOfBoundsException: -1
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:136)
    at org.apache.spark.mllib.linalg.BLAS$.dot(BLAS.scala:106)
    at org.apache.spark.mllib.optimization.HingeGradient.compute(Gradient.scala:313)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:192)
    at org.apache.spark.mllib.optimization.GradientDescent$$anonfun$runMiniBatchSGD$1$$anonfun$1.apply(GradientDescent.scala:190)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.TraversableOnce$$anonfun$foldLeft$1.apply(TraversableOnce.scala:144)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.foldLeft(TraversableOnce.scala:144)
    at scala.collection.AbstractIterator.foldLeft(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.aggregate(TraversableOnce.scala:201)
    at scala.collection.AbstractIterator.aggregate(Iterator.scala:1157)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$28.apply(RDD.scala:988)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$29.apply(RDD.scala:989)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
    at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
    at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
    at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
    at org.apache.spark.scheduler.Task.run(Task.scala:64)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)

Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1203)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1192)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1191)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1191)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:693)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:693)
    at scala.Option.foreach(Option.scala:236)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:693)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1393)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1354)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)

https://github.com/hermitD/temp这里是我的测试数据文件我用它来训练linux下的libsvm-tools并且它可以工作!和libsvm python工具的考试格式显示确定。只是不知道为什么会出错。

scala libsvm apache-spark-mllib
2个回答
1
投票

经过一些测试后,我终于解决了。我在这里写给其他遇到这个问题的人。这是我遇到的数据格式错误的一个例子

0 0:0 1:0 2:1 1 1:1 3:2

对于0:0和1:0/1:1的数据是ArrayIndexOutOfBoundsException的原因。如果遇到相同问题的人只是从过去的数据中删除它们或更新它。因为它在libsvm-tools中工作,所以我想在spark MLlib中它只是实现了一点点不同。


0
投票

我遇到了与libSVM格式和MLlib相同的问题。在我的情况下,第一个功能标记为0而不是1.XGBoost没有问题,但Weka和Spark MLlib都失败了相同的ArrayIndexOutOfBoundsException: -1

在这种情况下,解决方案是将每个功能的总和加1,而不是0.在Python中执行此操作的最简单方法是:

from sklearn.datasets import load_svmlight_file, dump_svmlight_file

X, y = load_svmlight_file('example.libsvm')
X.indices = (X.indices + 1)
dump_svmlight_file(X, y, 'fixed.libsvm')
最新问题
© www.soinside.com 2019 - 2024. All rights reserved.