使用 Maven 依赖项的 Spark 版本不匹配

Spark version mismatch using maven dependencies

我想要 运行 使用 apache Spark 的简单 worcount 示例。在 $SPARK_HOME/jars 中使用本地 jar 文件 运行 是正确的,但是使用 Maven 依赖性它会出错:

java.lang.NoSuchMethodError: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$$anonfun$apply$mcJ$sp.apply(SparkHadoopUtil.scala:149)
at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$$anonfun$apply$mcJ$sp.apply(SparkHadoopUtil.scala:149)
at scala.collection.TraversableLike$$anonfun$map.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map.apply(TraversableLike.scala:234)
at scala.collection.Iterator$class.foreach(Iterator.scala:893)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.deploy.SparkHadoopUtil$$anonfun.apply$mcJ$sp(SparkHadoopUtil.scala:149)
at org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
at org.apache.spark.rdd.HadoopRDD$$anon.<init>(HadoopRDD.scala:224)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:203)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:94)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:96)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:53)
at org.apache.spark.scheduler.Task.run(Task.scala:108)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)

代码如下:

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaSparkContext;
import scala.Tuple2;

import java.util.Arrays;

public class SparkTest {
    public static void main(String[] args){
        SparkConf conf = new SparkConf().setMaster("local[2]").setAppName("SparkTest");
        JavaSparkContext sc = new JavaSparkContext(conf);

        JavaRDD<String> rdd = sc.textFile("file:///usr/local/spark/LICENSE");
        JavaPairRDD<String, Integer> counts = rdd
                .flatMap(s -> Arrays.asList(s.split(" ")).iterator())
                .mapToPair(word -> new Tuple2<>(word, 1))
                .reduceByKey((a, b) -> a + b);

        counts.coalesce(1).saveAsTextFile("file:///home/XXX/Desktop/Processing/spark");

    }
}

这里是 POM.xml 文件:

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>Processing</groupId>
    <artifactId>Streaming</artifactId>
    <version>1.0-SNAPSHOT</version>
    <build>
        <plugins>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-compiler-plugin</artifactId>
                <configuration>
                    <source>1.8</source>
                    <target>1.8</target>
                </configuration>
            </plugin>
        </plugins>
    </build>
    <dependencies>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>1.3.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_2.11</artifactId>
            <version>1.3.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients_2.11</artifactId>
            <version>1.3.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.10.0.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka-0.10_2.11</artifactId>
            <version>1.3.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-core_2.11</artifactId>
            <version>2.2.0</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.3</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-filesystem_2.11</artifactId>
            <version>1.3.2</version>
        </dependency>
    </dependencies>
</project>

它还包括一些其他的 apache 软件,如 Hadoop 和 Flink。

安装的 Spark 版本:2.2.0 下载link:https://www.apache.org/dyn/closer.lua/spark/spark-2.2.0/spark-2.2.0-bin-hadoop2.7.tgz

Hadoop 安装版本 = 2.7.3

这里有些地方不匹配!

终于用 spark-core maven 依赖项为 Spark 创建了另一个专用的 maven 项目。

谁能说说为什么?

使用您的依赖项并显示 Java 如何使用 org.apache.hadoop.fs.FileSystem.class.getResource("FileSyste‌​m.class") 加载您的 class 看来您的 jar 是从 org.apache.flink:flink-shaded-hadoop2:jar:1.3.2 加载的。当使用 mvn dependency:tree 显示依赖树时,我们看到它是来自 flink-java:flink-streaming-java_2.11

的传递依赖
[INFO] +- org.apache.flink:flink-java:jar:1.3.2:compile
[INFO] |  +- ...
[INFO] |  +- org.apache.flink:flink-shaded-hadoop2:jar:1.3.2:compile
[INFO] +- org.apache.flink:flink-streaming-java_2.11:jar:1.3.2:compile
[INFO] |  +- org.apache.flink:flink-runtime_2.11:jar:1.3.2:compile
[INFO] |  |  +- org.apache.flink:flink-shaded-hadoop2:jar:1.3.2:compile

此 jar 包含整个 org.apache.hadoop.fs 包,覆盖了正确的定义并导致了您的问题。您可以尝试删除 flink-java 依赖项或排除 flink-shaded-hadoop2 ,但这可能会导致您的代码出现问题,因为其他必需的 Flink classes 可能会丢失。 例如:

        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>1.3.2</version>
            <exclusions>
                <exclusion>
                    <groupId>org.apache.flink</groupId>
                    <artifactId>flink-shaded-hadoop2</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_2.11</artifactId>
            <version>1.3.2</version>
            <exclusions>
                <exclusion>
                    <groupId>org.apache.flink</groupId>
                    <artifactId>flink-shaded-hadoop2</artifactId>
                </exclusion>
            </exclusions>
        </dependency>

否则你将不得不根据你的项目要求找到另一个解决方案:尝试 class 加载以确保你的 classes 被正确加载,更新你的依赖版本以便 Hadoop class与Flink等匹配

从 Flink 1.4(待发布)开始,Flink 可以 运行 没有任何 hadoop 依赖项,如果你需要 hadoop,类路径中有 hadoop 就足够了。这应该会让您的生活更轻松。