带 Spark 的独立 HBase,HBaseTest.scala 出错

Standalone HBase with Spark, HBaseTest.scala is giving error

您好,我正在使用独立的 hbase,我想在上面测试 spark。我的机器上没有hadoop。

当我尝试使用 HBaseTest.scala 获取 table 的计数时(在 Scala 示例中) 我收到以下错误:

ERROR TableInputFormat: java.io.IOException: java.lang.reflect.InvocationTargetException
    at org.apache.hadoop.hbase.client.HConnectionManager.createConnection(HConnectionManager.java:416)
    at org.apache.hadoop.hbase.client.HConnectionManager.createConnection(HConnectionManager.java:393)
    at org.apache.hadoop.hbase.client.HConnectionManager.getConnection(HConnectionManager.java:274)
    at org.apache.hadoop.hbase.client.HTable.<init>(HTable.java:194)
    at org.apache.hadoop.hbase.client.HTable.<init>(HTable.java:156)
    at org.apache.hadoop.hbase.mapreduce.TableInputFormat.setConf(TableInputFormat.java:101)
    at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:91)
    at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:219)
    at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:217)
    at scala.Option.getOrElse(Option.scala:120)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1632)
    at org.apache.spark.rdd.RDD.count(RDD.scala:1012)
    at org.apache.spark.examples.HBaseTest$.main(HBaseTest.scala:59)
    at org.apache.spark.examples.HBaseTest.main(HBaseTest.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:607)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain(SparkSubmit.scala:167)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:190)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:111)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
Caused by: java.lang.reflect.InvocationTargetException
    at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method)
    at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57)
    at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
    at java.lang.reflect.Constructor.newInstance(Constructor.java:526)
    at org.apache.hadoop.hbase.client.HConnectionManager.createConnection(HConnectionManager.java:414)
    ... 23 more
Caused by: java.lang.VerifyError: class org.apache.hadoop.hbase.protobuf.generated.ClientProtos$Result overrides final method getUnknownFields.()Lcom/google/protobuf/UnknownFieldSet;
    at java.lang.ClassLoader.defineClass1(Native Method)
    at java.lang.ClassLoader.defineClass(ClassLoader.java:800)
    at java.security.SecureClassLoader.defineClass(SecureClassLoader.java:142)
    at java.net.URLClassLoader.defineClass(URLClassLoader.java:449)
    at java.net.URLClassLoader.access0(URLClassLoader.java:71)
    at java.net.URLClassLoader.run(URLClassLoader.java:361)
    at java.net.URLClassLoader.run(URLClassLoader.java:355)
    at java.security.AccessController.doPrivileged(Native Method)
    at java.net.URLClassLoader.findClass(URLClassLoader.java:354)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:425)
    at sun.misc.Launcher$AppClassLoader.loadClass(Launcher.java:308)
    at java.lang.ClassLoader.loadClass(ClassLoader.java:358)
    at org.apache.hadoop.hbase.protobuf.ProtobufUtil.<clinit>(ProtobufUtil.java:176)
    at org.apache.hadoop.hbase.ClusterId.parseFrom(ClusterId.java:64)
    at org.apache.hadoop.hbase.zookeeper.ZKClusterId.readClusterIdZNode(ZKClusterId.java:69)
    at org.apache.hadoop.hbase.client.ZooKeeperRegistry.getClusterId(ZooKeeperRegistry.java:83)
    at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.retrieveClusterId(HConnectionManager.java:857)
    at org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation.<init>(HConnectionManager.java:662)
    ... 28 more

Exception in thread "main" java.io.IOException: No table was provided.
    at org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getSplits(TableInputFormatBase.java:154)
    at org.apache.spark.rdd.NewHadoopRDD.getPartitions(NewHadoopRDD.scala:95)
    at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:219)
    at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:217)
    at scala.Option.getOrElse(Option.scala:120)
    at org.apache.spark.rdd.RDD.partitions(RDD.scala:217)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1632)
    at org.apache.spark.rdd.RDD.count(RDD.scala:1012)
    at org.apache.spark.examples.HBaseTest$.main(HBaseTest.scala:59)
    at org.apache.spark.examples.HBaseTest.main(HBaseTest.scala)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:607)
    at org.apache.spark.deploy.SparkSubmit$.doRunMain(SparkSubmit.scala:167)
    at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:190)
    at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:111)
    at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)

我无法弄清楚这里的问题是什么。 HBaseTest.scala:

object HBaseTest {
  def main(args: Array[String]) {
    val sparkConf = new SparkConf().setAppName("HBaseTest").setMaster("local")
    val sc = new SparkContext(sparkConf)
    val conf = HBaseConfiguration.create()
    // Other options for configuring scan behavior are available. More information available at
    // http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
    conf.set("zookeeper.znode.parent", "/hbase-unsecure")
    conf.set("hbase.zookeeper.quorum", "localhost")
    conf.set("hbase.zookeeper.property.clientPort","2181")
    conf.addResource(new Path("/usr/lib/hbase/hbase-0.94.8/conf/hbase-site.xml"))
    conf.set(TableInputFormat.INPUT_TABLE,"test")
    // Initialize hBase table if necessary
    val admin = new HBaseAdmin(conf)
    if (!admin.isTableAvailable("test")) {
      print ("inside if statement")
      val tableDesc = new HTableDescriptor(TableName.valueOf("test"))
      admin.createTable(tableDesc)
    }
    val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
      classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
      classOf[org.apache.hadoop.hbase.client.Result])
    hBaseRDD.count()

    sc.stop()
  }
}

您正在使用 TableInputFormat class 作为输入格式。 TableInputFormat class 属于 hadoop Map-reduce API。您需要安装 hadoop 才能使用 TableInputFormat。