Spark ML Kmeans 给出:org.apache.spark.SparkException:无法执行用户定义的函数($anonfun$2:(向量)=> int)
Spark ML Kmeans give : org.apache.spark.SparkException: Failed to execute user defined function($anonfun$2: (vector) => int)
我尝试加载 KmeansModel,然后从中获取标签:
这是我编写的代码:
val kMeansModel = KMeansModel.load(trainedMlModel.mlModelFilePath)
val arrayOfElements = measurePoint.measurements.map(a => a._2).toSeq
println(s"ArrayOfELements::::$arrayOfElements")
val arrayDF = sparkContext.parallelize(arrayOfElements).toDF()
arrayDF.show()
val vectorDF = new VectorAssembler().setInputCols(arrayDF.columns).setOutputCol("features").transform(arrayDF)
vectorDF.printSchema()
vectorDF.show()
val loadedModel = kMeansModel.setFeaturesCol("features").setPredictionCol("label")
val labelDF = loadedModel.transform(vectorDF)
labelDF.printSchema()
labelDF.show()
val label = labelDF.rdd.map(_.getAs[Int]("label")).collect().head
它生成的错误 StackTrace 在这里:
org.apache.spark.SparkException: Failed to execute user defined function($anonfun: (vector) => int)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$anon.hasNext(WholeStageCodegenExec.scala:377)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48)
at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310)
at scala.collection.AbstractIterator.to(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1336)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:99)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed
at scala.Predef$.require(Predef.scala:212)
at org.apache.spark.mllib.util.MLUtils$.fastSquaredDistance(MLUtils.scala:486)
at org.apache.spark.mllib.clustering.KMeans$.fastSquaredDistance(KMeans.scala:589)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:563)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:557)
at scala.collection.mutable.ArraySeq.foreach(ArraySeq.scala:74)
at org.apache.spark.mllib.clustering.KMeans$.findClosest(KMeans.scala:557)
at org.apache.spark.mllib.clustering.KMeansModel.predict(KMeansModel.scala:59)
at org.apache.spark.ml.clustering.KMeansModel.predict(KMeans.scala:134)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
... 27 more
17/03/08 23:11:41 WARN TaskSetManager: Lost task 3.0 in stage 26.0 (TID 45, localhost, executor driver): org.apache.spark.SparkException: Failed to execute user defined function($anonfun: (vector) => int)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$anon.hasNext(WholeStageCodegenExec.scala:377)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48)
at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310)
at scala.collection.AbstractIterator.to(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1336)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:99)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed
at scala.Predef$.require(Predef.scala:212)
at org.apache.spark.mllib.util.MLUtils$.fastSquaredDistance(MLUtils.scala:486)
at org.apache.spark.mllib.clustering.KMeans$.fastSquaredDistance(KMeans.scala:589)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:563)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:557)
at scala.collection.mutable.ArraySeq.foreach(ArraySeq.scala:74)
at org.apache.spark.mllib.clustering.KMeans$.findClosest(KMeans.scala:557)
at org.apache.spark.mllib.clustering.KMeansModel.predict(KMeansModel.scala:59)
at org.apache.spark.ml.clustering.KMeansModel.predict(KMeans.scala:134)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
... 27 more
这意味着新数据与用于训练模型的数据之间存在维度不匹配。您的代码失败 at MLUtils.scala:486
,它检查两个 Vectors
是否具有相同的大小:
private[mllib] def fastSquaredDistance(
v1: Vector,
norm1: Double,
v2: Vector,
norm2: Double,
precision: Double = 1e-6): Double = {
val n = v1.size
require(v2.size == n)
...
}
我尝试加载 KmeansModel,然后从中获取标签:
这是我编写的代码:
val kMeansModel = KMeansModel.load(trainedMlModel.mlModelFilePath)
val arrayOfElements = measurePoint.measurements.map(a => a._2).toSeq
println(s"ArrayOfELements::::$arrayOfElements")
val arrayDF = sparkContext.parallelize(arrayOfElements).toDF()
arrayDF.show()
val vectorDF = new VectorAssembler().setInputCols(arrayDF.columns).setOutputCol("features").transform(arrayDF)
vectorDF.printSchema()
vectorDF.show()
val loadedModel = kMeansModel.setFeaturesCol("features").setPredictionCol("label")
val labelDF = loadedModel.transform(vectorDF)
labelDF.printSchema()
labelDF.show()
val label = labelDF.rdd.map(_.getAs[Int]("label")).collect().head
它生成的错误 StackTrace 在这里:
org.apache.spark.SparkException: Failed to execute user defined function($anonfun: (vector) => int)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$anon.hasNext(WholeStageCodegenExec.scala:377)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48)
at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310)
at scala.collection.AbstractIterator.to(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1336)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:99)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed
at scala.Predef$.require(Predef.scala:212)
at org.apache.spark.mllib.util.MLUtils$.fastSquaredDistance(MLUtils.scala:486)
at org.apache.spark.mllib.clustering.KMeans$.fastSquaredDistance(KMeans.scala:589)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:563)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:557)
at scala.collection.mutable.ArraySeq.foreach(ArraySeq.scala:74)
at org.apache.spark.mllib.clustering.KMeans$.findClosest(KMeans.scala:557)
at org.apache.spark.mllib.clustering.KMeansModel.predict(KMeansModel.scala:59)
at org.apache.spark.ml.clustering.KMeansModel.predict(KMeans.scala:134)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
... 27 more
17/03/08 23:11:41 WARN TaskSetManager: Lost task 3.0 in stage 26.0 (TID 45, localhost, executor driver): org.apache.spark.SparkException: Failed to execute user defined function($anonfun: (vector) => int)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$$anon.hasNext(WholeStageCodegenExec.scala:377)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$$anon.hasNext(Iterator.scala:408)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104)
at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48)
at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310)
at scala.collection.AbstractIterator.to(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302)
at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1336)
at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289)
at scala.collection.AbstractIterator.toArray(Iterator.scala:1336)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.rdd.RDD$$anonfun$collect$$anonfun.apply(RDD.scala:935)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.SparkContext$$anonfun$runJob.apply(SparkContext.scala:1944)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
at org.apache.spark.scheduler.Task.run(Task.scala:99)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:282)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: requirement failed
at scala.Predef$.require(Predef.scala:212)
at org.apache.spark.mllib.util.MLUtils$.fastSquaredDistance(MLUtils.scala:486)
at org.apache.spark.mllib.clustering.KMeans$.fastSquaredDistance(KMeans.scala:589)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:563)
at org.apache.spark.mllib.clustering.KMeans$$anonfun$findClosest.apply(KMeans.scala:557)
at scala.collection.mutable.ArraySeq.foreach(ArraySeq.scala:74)
at org.apache.spark.mllib.clustering.KMeans$.findClosest(KMeans.scala:557)
at org.apache.spark.mllib.clustering.KMeansModel.predict(KMeansModel.scala:59)
at org.apache.spark.ml.clustering.KMeansModel.predict(KMeans.scala:134)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
at org.apache.spark.ml.clustering.KMeansModel$$anonfun.apply(KMeans.scala:125)
... 27 more
这意味着新数据与用于训练模型的数据之间存在维度不匹配。您的代码失败 at MLUtils.scala:486
,它检查两个 Vectors
是否具有相同的大小:
private[mllib] def fastSquaredDistance(
v1: Vector,
norm1: Double,
v2: Vector,
norm2: Double,
precision: Double = 1e-6): Double = {
val n = v1.size
require(v2.size == n)
...
}