java.io.IOException:org.apache.hadoop.security.AccessControlException:客户端无法通过:[TOKEN, KERBEROS] 进行身份验证
java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
我的配置如下:
运行 Spark 1.2.0、Hadoop 2.5.0/YARN、Cloudera CDH5 VM Centos 6.2 运行在 Windows 64 位平台上 8GB RAM
下面是来自 spark-shell 的命令序列 运行 但是在尝试打印 cust
RDD 时,我收到 Kerberos 身份验证错误。我已经从 cloudera
用户登录到 spark-shell 作为登录名,Cloudera VM 是使用 cloudera@HADOOP.LOCALDOMAIN 作为默认主体进行 Kerberos 身份验证的
是否有任何方法可以从 spark-shell 验证 Kerberos 以进行正常的 RDD 操作?
或者我遗漏了什么?感谢任何正当的帮助,将得到回报
下面是 Spark Shell 命令:
scala> sc
res0: org.apache.spark.SparkContext = org.apache.spark.SparkContext@26226a12
scala> sqlContext
res1: org.apache.spark.sql.SQLContext = org.apache.spark.sql.SQLContext@7213fc4a
scala> import sqlContext.createSchemaRDD
import sqlContext.createSchemaRDD
scala> case class Cust_flat_xml(xmldata: String)
defined class Cust_flat_xml
尝试从本地文件路径加载;但从错误消息看来,需要 HDFS 输入
**scala> val cust = sc.textFile("/home/cloudera/tdaf/tdaf_xml_data/new_cust_20110630_cpy").map(_.split(" ")).map(p => Cust_flat_xml(p(0)))**
15/07/01 11:11:45 INFO MemoryStore: ensureFreeSpace(260017) called with curMem=843639, maxMem=280248975
15/07/01 11:11:45 INFO MemoryStore: Block broadcast_3 stored as values in memory (estimated size 253.9 KB, free 266.2 MB)
15/07/01 11:11:45 INFO MemoryStore: ensureFreeSpace(21212) called with curMem=1103656, maxMem=280248975
15/07/01 11:11:45 INFO MemoryStore: Block broadcast_3_piece0 stored as bytes in memory (estimated size 20.7 KB, free 266.2 MB)
15/07/01 11:11:45 INFO BlockManagerInfo: Added broadcast_3_piece0 in memory on 10.113.234.25:58467 (size: 20.7 KB, free: 267.2 MB)
15/07/01 11:11:45 INFO BlockManagerMaster: Updated info of block broadcast_3_piece0
15/07/01 11:11:45 INFO SparkContext: Created broadcast 3 from textFile at <console>:28
cust: org.apache.spark.rdd.RDD[Cust_flat_xml] = MappedRDD[9] at map at <console>:28
scala> cust.foreach(println)
15/07/01 11:12:07 INFO DFSClient: Created HDFS_DELEGATION_TOKEN token 42 for cloudera on 127.0.0.1:8020
15/07/01 11:12:07 INFO TokenCache: Got dt for hdfs://localhost.localdomain:8020; Kind: HDFS_DELEGATION_TOKEN, Service: 127.0.0.1:8020, Ident: (HDFS_DELEGATION_TOKEN token 42 for cloudera)
**org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: hdfs://localhost.localdomain:8020/home/cloudera/tdaf/tdaf_xml_data/new_cust_20110630_cpy**
at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:285)
at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228)
at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:313)
at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:201)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1328)
at org.apache.spark.rdd.RDD.foreach(RDD.scala:765)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:40)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:44)
at $iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
at $iwC$$iwC$$iwC.<init>(<console>:48)
at $iwC$$iwC.<init>(<console>:50)
at $iwC.<init>(<console>:52)
at <init>(<console>:54)
at .<init>(<console>:58)
at .<clinit>(<console>)
at .<init>(<console>:7)
at .<clinit>(<console>)
at $print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:852)
at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1125)
at org.apache.spark.repl.SparkIMain.loadAndRunReq(SparkIMain.scala:674)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:705)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:669)
at org.apache.spark.repl.SparkILoop.reallyInterpret(SparkILoop.scala:828)
at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:873)
at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:785)
at org.apache.spark.repl.SparkILoop.processLine(SparkILoop.scala:628)
at org.apache.spark.repl.SparkILoop.innerLoop(SparkILoop.scala:636)
at org.apache.spark.repl.SparkILoop.loop(SparkILoop.scala:641)
at org.apache.spark.repl.SparkILoop$$anonfun$process.apply$mcZ$sp(SparkILoop.scala:968)
at org.apache.spark.repl.SparkILoop$$anonfun$process.apply(SparkILoop.scala:916)
at org.apache.spark.repl.SparkILoop$$anonfun$process.apply(SparkILoop.scala:916)
at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:916)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1011)
at org.apache.spark.repl.Main$.main(Main.scala:31)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.spark.deploy.SparkSubmit$.launch(SparkSubmit.scala:358)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:75)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
添加 new_cust_20110630_cpy 到 HDFS 路径 hdfs://localhost.localdomain:8020/spark/sparksql/input
scala> val cust = sc.textFile("/spark/sparksql/input/new_cust_20110630_cpy").map(_.split(" ")).map(p => Cust_flat_xml(p(0)))
15/07/01 11:19:06 INFO MemoryStore: ensureFreeSpace(260041) called with curMem=1124868, maxMem=280248975
15/07/01 11:19:06 INFO MemoryStore: Block broadcast_4 stored as values in memory (estimated size 253.9 KB, free 265.9 MB)
15/07/01 11:19:06 INFO MemoryStore: ensureFreeSpace(21212) called with curMem=1384909, maxMem=280248975
15/07/01 11:19:06 INFO MemoryStore: Block broadcast_4_piece0 stored as bytes in memory (estimated size 20.7 KB, free 265.9 MB)
15/07/01 11:19:06 INFO BlockManagerInfo: Added broadcast_4_piece0 in memory on 10.113.234.25:58467 (size: 20.7 KB, free: 267.2 MB)
15/07/01 11:19:06 INFO BlockManagerMaster: Updated info of block broadcast_4_piece0
15/07/01 11:19:06 INFO SparkContext: Created broadcast 4 from textFile at <console>:28
cust: org.apache.spark.rdd.RDD[Cust_flat_xml] = MappedRDD[13] at map at <console>:28
scala>
scala> cust.foreach(println)
15/07/01 11:19:47 INFO DFSClient: Created HDFS_DELEGATION_TOKEN token 43 for cloudera on 127.0.0.1:8020
15/07/01 11:19:47 INFO TokenCache: Got dt for hdfs://localhost.localdomain:8020; Kind: HDFS_DELEGATION_TOKEN, Service: 127.0.0.1:8020, Ident: (HDFS_DELEGATION_TOKEN token 43 for cloudera)
15/07/01 11:19:47 INFO FileInputFormat: Total input paths to process : 1
15/07/01 11:19:47 INFO SparkContext: Starting job: foreach at <console>:31
15/07/01 11:19:47 INFO DAGScheduler: Got job 0 (foreach at <console>:31) with 2 output partitions (allowLocal=false)
15/07/01 11:19:47 INFO DAGScheduler: Final stage: Stage 0(foreach at <console>:31)
15/07/01 11:19:47 INFO DAGScheduler: Parents of final stage: List()
15/07/01 11:19:47 INFO DAGScheduler: Missing parents: List()
15/07/01 11:19:47 INFO DAGScheduler: Submitting Stage 0 (MappedRDD[13] at map at <console>:28), which has no missing parents
15/07/01 11:19:47 INFO MemoryStore: ensureFreeSpace(3080) called with curMem=1406121, maxMem=280248975
15/07/01 11:19:47 INFO MemoryStore: Block broadcast_5 stored as values in memory (estimated size 3.0 KB, free 265.9 MB)
15/07/01 11:19:47 INFO MemoryStore: ensureFreeSpace(1800) called with curMem=1409201, maxMem=280248975
15/07/01 11:19:47 INFO MemoryStore: Block broadcast_5_piece0 stored as bytes in memory (estimated size 1800.0 B, free 265.9 MB)
15/07/01 11:19:47 INFO BlockManagerInfo: Added broadcast_5_piece0 in memory on 10.113.234.25:58467 (size: 1800.0 B, free: 267.2 MB)
15/07/01 11:19:47 INFO BlockManagerMaster: Updated info of block broadcast_5_piece0
15/07/01 11:19:47 INFO SparkContext: Created broadcast 5 from broadcast at DAGScheduler.scala:838
15/07/01 11:19:47 INFO DAGScheduler: Submitting 2 missing tasks from Stage 0 (MappedRDD[13] at map at <console>:28)
15/07/01 11:19:47 INFO TaskSchedulerImpl: Adding task set 0.0 with 2 tasks
15/07/01 11:19:47 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:48 INFO BlockManagerInfo: Added broadcast_5_piece0 in memory on 10.113.234.25:40605 (size: 1800.0 B, free: 267.3 MB)
15/07/01 11:19:50 INFO BlockManagerInfo: Added broadcast_4_piece0 in memory on 10.113.234.25:40605 (size: 20.7 KB, free: 267.2 MB)
15/07/01 11:19:53 INFO TaskSetManager: Starting task 1.0 in stage 0.0 (TID 1, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:53 WARN TaskSetManager: Lost task 0.0 in stage 0.0 (TID 0, 10.113.234.25): java.io.IOException: Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020;
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:764)
at org.apache.hadoop.ipc.Client.call(Client.java:1415)
at org.apache.hadoop.ipc.Client.call(Client.java:1364)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy19.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:246)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy20.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1179)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1169)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1159)
at org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:270)
at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:237)
at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:230)
at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1457)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:301)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:766)
at org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:108)
at org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:67)
at org.apache.spark.rdd.HadoopRDD$$anon.<init>(HadoopRDD.scala:233)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:210)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:99)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
at org.apache.spark.scheduler.Task.run(Task.scala:56)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:679)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.handleSaslConnectionFailure(Client.java:642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:725)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1463)
at org.apache.hadoop.ipc.Client.call(Client.java:1382)
... 45 more
Caused by: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.security.SaslRpcClient.selectSaslClient(SaslRpcClient.java:172)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:396)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:552)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:717)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:713)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:712)
... 48 more
15/07/01 11:19:53 INFO TaskSetManager: Starting task 0.1 in stage 0.0 (TID 2, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:53 INFO TaskSetManager: Lost task 1.0 in stage 0.0 (TID 1) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 1]
15/07/01 11:19:53 INFO TaskSetManager: Starting task 1.1 in stage 0.0 (TID 3, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:53 INFO TaskSetManager: Lost task 0.1 in stage 0.0 (TID 2) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 2]
15/07/01 11:19:53 INFO TaskSetManager: Starting task 0.2 in stage 0.0 (TID 4, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 1.1 in stage 0.0 (TID 3) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 3]
15/07/01 11:19:54 INFO TaskSetManager: Starting task 1.2 in stage 0.0 (TID 5, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 0.2 in stage 0.0 (TID 4) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 4]
15/07/01 11:19:54 INFO TaskSetManager: Starting task 0.3 in stage 0.0 (TID 6, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 1.2 in stage 0.0 (TID 5) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 5]
15/07/01 11:19:54 INFO TaskSetManager: Starting task 1.3 in stage 0.0 (TID 7, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 0.3 in stage 0.0 (TID 6) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 6]
15/07/01 11:19:54 ERROR TaskSetManager: Task 0 in stage 0.0 failed 4 times; aborting job
15/07/01 11:19:54 INFO TaskSchedulerImpl: Cancelling stage 0
15/07/01 11:19:54 INFO TaskSchedulerImpl: Stage 0 was cancelled
15/07/01 11:19:54 INFO DAGScheduler: Job 0 failed: foreach at <console>:31, took 6.713733 s
15/07/01 11:19:54 INFO TaskSetManager: Lost task 1.3 in stage 0.0 (TID 7) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 7]
15/07/01 11:19:54 INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 4 times, most recent failure: Lost task 0.3 in stage 0.0 (TID 6, 10.113.234.25): java.io.IOException: Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020;
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:764)
at org.apache.hadoop.ipc.Client.call(Client.java:1415)
at org.apache.hadoop.ipc.Client.call(Client.java:1364)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy19.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:246)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy20.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1179)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1169)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1159)
at org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:270)
at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:237)
at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:230)
at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1457)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:301)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:766)
at org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:108)
at org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:67)
at org.apache.spark.rdd.HadoopRDD$$anon.<init>(HadoopRDD.scala:233)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:210)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:99)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
at org.apache.spark.scheduler.Task.run(Task.scala:56)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:679)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.handleSaslConnectionFailure(Client.java:642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:725)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1463)
at org.apache.hadoop.ipc.Client.call(Client.java:1382)
... 45 more
Caused by: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.security.SaslRpcClient.selectSaslClient(SaslRpcClient.java:172)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:396)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:552)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:717)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:713)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:712)
... 48 more
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1214)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage.apply(DAGScheduler.scala:1203)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage.apply(DAGScheduler.scala:1202)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1202)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed.apply(DAGScheduler.scala:696)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed.apply(DAGScheduler.scala:696)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:696)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive.applyOrElse(DAGScheduler.scala:1420)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
scala>
我一直运行在独立模式下使用 Spark 1.2.0 并使用支持 Kerberos 的 CDH5 集群(Cloudera VM)。所以Spark应用不能是运行.
有关更多详细信息,请参阅以下来自 cloudera 的 link:
http://www.cloudera.com/content/cloudera/en/documentation/core/latest/topics/sg_spark_auth.html
HTH ... 觉得有用请加分。谢谢
我的配置如下: 运行 Spark 1.2.0、Hadoop 2.5.0/YARN、Cloudera CDH5 VM Centos 6.2 运行在 Windows 64 位平台上 8GB RAM
下面是来自 spark-shell 的命令序列 运行 但是在尝试打印 cust
RDD 时,我收到 Kerberos 身份验证错误。我已经从 cloudera
用户登录到 spark-shell 作为登录名,Cloudera VM 是使用 cloudera@HADOOP.LOCALDOMAIN 作为默认主体进行 Kerberos 身份验证的
是否有任何方法可以从 spark-shell 验证 Kerberos 以进行正常的 RDD 操作?
或者我遗漏了什么?感谢任何正当的帮助,将得到回报
下面是 Spark Shell 命令:
scala> sc
res0: org.apache.spark.SparkContext = org.apache.spark.SparkContext@26226a12
scala> sqlContext
res1: org.apache.spark.sql.SQLContext = org.apache.spark.sql.SQLContext@7213fc4a
scala> import sqlContext.createSchemaRDD
import sqlContext.createSchemaRDD
scala> case class Cust_flat_xml(xmldata: String)
defined class Cust_flat_xml
尝试从本地文件路径加载;但从错误消息看来,需要 HDFS 输入
**scala> val cust = sc.textFile("/home/cloudera/tdaf/tdaf_xml_data/new_cust_20110630_cpy").map(_.split(" ")).map(p => Cust_flat_xml(p(0)))**
15/07/01 11:11:45 INFO MemoryStore: ensureFreeSpace(260017) called with curMem=843639, maxMem=280248975
15/07/01 11:11:45 INFO MemoryStore: Block broadcast_3 stored as values in memory (estimated size 253.9 KB, free 266.2 MB)
15/07/01 11:11:45 INFO MemoryStore: ensureFreeSpace(21212) called with curMem=1103656, maxMem=280248975
15/07/01 11:11:45 INFO MemoryStore: Block broadcast_3_piece0 stored as bytes in memory (estimated size 20.7 KB, free 266.2 MB)
15/07/01 11:11:45 INFO BlockManagerInfo: Added broadcast_3_piece0 in memory on 10.113.234.25:58467 (size: 20.7 KB, free: 267.2 MB)
15/07/01 11:11:45 INFO BlockManagerMaster: Updated info of block broadcast_3_piece0
15/07/01 11:11:45 INFO SparkContext: Created broadcast 3 from textFile at <console>:28
cust: org.apache.spark.rdd.RDD[Cust_flat_xml] = MappedRDD[9] at map at <console>:28
scala> cust.foreach(println)
15/07/01 11:12:07 INFO DFSClient: Created HDFS_DELEGATION_TOKEN token 42 for cloudera on 127.0.0.1:8020
15/07/01 11:12:07 INFO TokenCache: Got dt for hdfs://localhost.localdomain:8020; Kind: HDFS_DELEGATION_TOKEN, Service: 127.0.0.1:8020, Ident: (HDFS_DELEGATION_TOKEN token 42 for cloudera)
**org.apache.hadoop.mapred.InvalidInputException: Input path does not exist: hdfs://localhost.localdomain:8020/home/cloudera/tdaf/tdaf_xml_data/new_cust_20110630_cpy**
at org.apache.hadoop.mapred.FileInputFormat.singleThreadedListStatus(FileInputFormat.java:285)
at org.apache.hadoop.mapred.FileInputFormat.listStatus(FileInputFormat.java:228)
at org.apache.hadoop.mapred.FileInputFormat.getSplits(FileInputFormat.java:313)
at org.apache.spark.rdd.HadoopRDD.getPartitions(HadoopRDD.scala:201)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.rdd.MappedRDD.getPartitions(MappedRDD.scala:28)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:205)
at org.apache.spark.rdd.RDD$$anonfun$partitions.apply(RDD.scala:203)
at scala.Option.getOrElse(Option.scala:120)
at org.apache.spark.rdd.RDD.partitions(RDD.scala:203)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:1328)
at org.apache.spark.rdd.RDD.foreach(RDD.scala:765)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:31)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:36)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:38)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:40)
at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:42)
at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:44)
at $iwC$$iwC$$iwC$$iwC.<init>(<console>:46)
at $iwC$$iwC$$iwC.<init>(<console>:48)
at $iwC$$iwC.<init>(<console>:50)
at $iwC.<init>(<console>:52)
at <init>(<console>:54)
at .<init>(<console>:58)
at .<clinit>(<console>)
at .<init>(<console>:7)
at .<clinit>(<console>)
at $print(<console>)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:852)
at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1125)
at org.apache.spark.repl.SparkIMain.loadAndRunReq(SparkIMain.scala:674)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:705)
at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:669)
at org.apache.spark.repl.SparkILoop.reallyInterpret(SparkILoop.scala:828)
at org.apache.spark.repl.SparkILoop.interpretStartingWith(SparkILoop.scala:873)
at org.apache.spark.repl.SparkILoop.command(SparkILoop.scala:785)
at org.apache.spark.repl.SparkILoop.processLine(SparkILoop.scala:628)
at org.apache.spark.repl.SparkILoop.innerLoop(SparkILoop.scala:636)
at org.apache.spark.repl.SparkILoop.loop(SparkILoop.scala:641)
at org.apache.spark.repl.SparkILoop$$anonfun$process.apply$mcZ$sp(SparkILoop.scala:968)
at org.apache.spark.repl.SparkILoop$$anonfun$process.apply(SparkILoop.scala:916)
at org.apache.spark.repl.SparkILoop$$anonfun$process.apply(SparkILoop.scala:916)
at scala.tools.nsc.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:135)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:916)
at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:1011)
at org.apache.spark.repl.Main$.main(Main.scala:31)
at org.apache.spark.repl.Main.main(Main.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.spark.deploy.SparkSubmit$.launch(SparkSubmit.scala:358)
at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:75)
at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
添加 new_cust_20110630_cpy 到 HDFS 路径 hdfs://localhost.localdomain:8020/spark/sparksql/input
scala> val cust = sc.textFile("/spark/sparksql/input/new_cust_20110630_cpy").map(_.split(" ")).map(p => Cust_flat_xml(p(0)))
15/07/01 11:19:06 INFO MemoryStore: ensureFreeSpace(260041) called with curMem=1124868, maxMem=280248975
15/07/01 11:19:06 INFO MemoryStore: Block broadcast_4 stored as values in memory (estimated size 253.9 KB, free 265.9 MB)
15/07/01 11:19:06 INFO MemoryStore: ensureFreeSpace(21212) called with curMem=1384909, maxMem=280248975
15/07/01 11:19:06 INFO MemoryStore: Block broadcast_4_piece0 stored as bytes in memory (estimated size 20.7 KB, free 265.9 MB)
15/07/01 11:19:06 INFO BlockManagerInfo: Added broadcast_4_piece0 in memory on 10.113.234.25:58467 (size: 20.7 KB, free: 267.2 MB)
15/07/01 11:19:06 INFO BlockManagerMaster: Updated info of block broadcast_4_piece0
15/07/01 11:19:06 INFO SparkContext: Created broadcast 4 from textFile at <console>:28
cust: org.apache.spark.rdd.RDD[Cust_flat_xml] = MappedRDD[13] at map at <console>:28
scala>
scala> cust.foreach(println)
15/07/01 11:19:47 INFO DFSClient: Created HDFS_DELEGATION_TOKEN token 43 for cloudera on 127.0.0.1:8020
15/07/01 11:19:47 INFO TokenCache: Got dt for hdfs://localhost.localdomain:8020; Kind: HDFS_DELEGATION_TOKEN, Service: 127.0.0.1:8020, Ident: (HDFS_DELEGATION_TOKEN token 43 for cloudera)
15/07/01 11:19:47 INFO FileInputFormat: Total input paths to process : 1
15/07/01 11:19:47 INFO SparkContext: Starting job: foreach at <console>:31
15/07/01 11:19:47 INFO DAGScheduler: Got job 0 (foreach at <console>:31) with 2 output partitions (allowLocal=false)
15/07/01 11:19:47 INFO DAGScheduler: Final stage: Stage 0(foreach at <console>:31)
15/07/01 11:19:47 INFO DAGScheduler: Parents of final stage: List()
15/07/01 11:19:47 INFO DAGScheduler: Missing parents: List()
15/07/01 11:19:47 INFO DAGScheduler: Submitting Stage 0 (MappedRDD[13] at map at <console>:28), which has no missing parents
15/07/01 11:19:47 INFO MemoryStore: ensureFreeSpace(3080) called with curMem=1406121, maxMem=280248975
15/07/01 11:19:47 INFO MemoryStore: Block broadcast_5 stored as values in memory (estimated size 3.0 KB, free 265.9 MB)
15/07/01 11:19:47 INFO MemoryStore: ensureFreeSpace(1800) called with curMem=1409201, maxMem=280248975
15/07/01 11:19:47 INFO MemoryStore: Block broadcast_5_piece0 stored as bytes in memory (estimated size 1800.0 B, free 265.9 MB)
15/07/01 11:19:47 INFO BlockManagerInfo: Added broadcast_5_piece0 in memory on 10.113.234.25:58467 (size: 1800.0 B, free: 267.2 MB)
15/07/01 11:19:47 INFO BlockManagerMaster: Updated info of block broadcast_5_piece0
15/07/01 11:19:47 INFO SparkContext: Created broadcast 5 from broadcast at DAGScheduler.scala:838
15/07/01 11:19:47 INFO DAGScheduler: Submitting 2 missing tasks from Stage 0 (MappedRDD[13] at map at <console>:28)
15/07/01 11:19:47 INFO TaskSchedulerImpl: Adding task set 0.0 with 2 tasks
15/07/01 11:19:47 INFO TaskSetManager: Starting task 0.0 in stage 0.0 (TID 0, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:48 INFO BlockManagerInfo: Added broadcast_5_piece0 in memory on 10.113.234.25:40605 (size: 1800.0 B, free: 267.3 MB)
15/07/01 11:19:50 INFO BlockManagerInfo: Added broadcast_4_piece0 in memory on 10.113.234.25:40605 (size: 20.7 KB, free: 267.2 MB)
15/07/01 11:19:53 INFO TaskSetManager: Starting task 1.0 in stage 0.0 (TID 1, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:53 WARN TaskSetManager: Lost task 0.0 in stage 0.0 (TID 0, 10.113.234.25): java.io.IOException: Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020;
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:764)
at org.apache.hadoop.ipc.Client.call(Client.java:1415)
at org.apache.hadoop.ipc.Client.call(Client.java:1364)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy19.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:246)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy20.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1179)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1169)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1159)
at org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:270)
at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:237)
at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:230)
at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1457)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:301)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:766)
at org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:108)
at org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:67)
at org.apache.spark.rdd.HadoopRDD$$anon.<init>(HadoopRDD.scala:233)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:210)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:99)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
at org.apache.spark.scheduler.Task.run(Task.scala:56)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:679)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.handleSaslConnectionFailure(Client.java:642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:725)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1463)
at org.apache.hadoop.ipc.Client.call(Client.java:1382)
... 45 more
Caused by: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.security.SaslRpcClient.selectSaslClient(SaslRpcClient.java:172)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:396)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:552)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:717)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:713)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:712)
... 48 more
15/07/01 11:19:53 INFO TaskSetManager: Starting task 0.1 in stage 0.0 (TID 2, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:53 INFO TaskSetManager: Lost task 1.0 in stage 0.0 (TID 1) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 1]
15/07/01 11:19:53 INFO TaskSetManager: Starting task 1.1 in stage 0.0 (TID 3, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:53 INFO TaskSetManager: Lost task 0.1 in stage 0.0 (TID 2) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 2]
15/07/01 11:19:53 INFO TaskSetManager: Starting task 0.2 in stage 0.0 (TID 4, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 1.1 in stage 0.0 (TID 3) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 3]
15/07/01 11:19:54 INFO TaskSetManager: Starting task 1.2 in stage 0.0 (TID 5, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 0.2 in stage 0.0 (TID 4) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 4]
15/07/01 11:19:54 INFO TaskSetManager: Starting task 0.3 in stage 0.0 (TID 6, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 1.2 in stage 0.0 (TID 5) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 5]
15/07/01 11:19:54 INFO TaskSetManager: Starting task 1.3 in stage 0.0 (TID 7, 10.113.234.25, ANY, 1340 bytes)
15/07/01 11:19:54 INFO TaskSetManager: Lost task 0.3 in stage 0.0 (TID 6) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 6]
15/07/01 11:19:54 ERROR TaskSetManager: Task 0 in stage 0.0 failed 4 times; aborting job
15/07/01 11:19:54 INFO TaskSchedulerImpl: Cancelling stage 0
15/07/01 11:19:54 INFO TaskSchedulerImpl: Stage 0 was cancelled
15/07/01 11:19:54 INFO DAGScheduler: Job 0 failed: foreach at <console>:31, took 6.713733 s
15/07/01 11:19:54 INFO TaskSetManager: Lost task 1.3 in stage 0.0 (TID 7) on executor 10.113.234.25: java.io.IOException (Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020; ) [duplicate 7]
15/07/01 11:19:54 INFO TaskSchedulerImpl: Removed TaskSet 0.0, whose tasks have all completed, from pool
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 4 times, most recent failure: Lost task 0.3 in stage 0.0 (TID 6, 10.113.234.25): java.io.IOException: Failed on local exception: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]; Host Details : local host is: "localhost.localdomain/127.0.0.1"; destination host is: "localhost.localdomain":8020;
at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:764)
at org.apache.hadoop.ipc.Client.call(Client.java:1415)
at org.apache.hadoop.ipc.Client.call(Client.java:1364)
at org.apache.hadoop.ipc.ProtobufRpcEngine$Invoker.invoke(ProtobufRpcEngine.java:206)
at com.sun.proxy.$Proxy19.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.getBlockLocations(ClientNamenodeProtocolTranslatorPB.java:246)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:606)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:187)
at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:102)
at com.sun.proxy.$Proxy20.getBlockLocations(Unknown Source)
at org.apache.hadoop.hdfs.DFSClient.callGetBlockLocations(DFSClient.java:1179)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1169)
at org.apache.hadoop.hdfs.DFSClient.getLocatedBlocks(DFSClient.java:1159)
at org.apache.hadoop.hdfs.DFSInputStream.fetchLocatedBlocksAndGetLastBlockLength(DFSInputStream.java:270)
at org.apache.hadoop.hdfs.DFSInputStream.openInfo(DFSInputStream.java:237)
at org.apache.hadoop.hdfs.DFSInputStream.<init>(DFSInputStream.java:230)
at org.apache.hadoop.hdfs.DFSClient.open(DFSClient.java:1457)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:301)
at org.apache.hadoop.hdfs.DistributedFileSystem.doCall(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
at org.apache.hadoop.hdfs.DistributedFileSystem.open(DistributedFileSystem.java:297)
at org.apache.hadoop.fs.FileSystem.open(FileSystem.java:766)
at org.apache.hadoop.mapred.LineRecordReader.<init>(LineRecordReader.java:108)
at org.apache.hadoop.mapred.TextInputFormat.getRecordReader(TextInputFormat.java:67)
at org.apache.spark.rdd.HadoopRDD$$anon.<init>(HadoopRDD.scala:233)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:210)
at org.apache.spark.rdd.HadoopRDD.compute(HadoopRDD.scala:99)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:263)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:230)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
at org.apache.spark.scheduler.Task.run(Task.scala:56)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:196)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: java.io.IOException: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:679)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.handleSaslConnectionFailure(Client.java:642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:725)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client.getConnection(Client.java:1463)
at org.apache.hadoop.ipc.Client.call(Client.java:1382)
... 45 more
Caused by: org.apache.hadoop.security.AccessControlException: Client cannot authenticate via:[TOKEN, KERBEROS]
at org.apache.hadoop.security.SaslRpcClient.selectSaslClient(SaslRpcClient.java:172)
at org.apache.hadoop.security.SaslRpcClient.saslConnect(SaslRpcClient.java:396)
at org.apache.hadoop.ipc.Client$Connection.setupSaslConnection(Client.java:552)
at org.apache.hadoop.ipc.Client$Connection.access00(Client.java:367)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:717)
at org.apache.hadoop.ipc.Client$Connection.run(Client.java:713)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:415)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1642)
at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:712)
... 48 more
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1214)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage.apply(DAGScheduler.scala:1203)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage.apply(DAGScheduler.scala:1202)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1202)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed.apply(DAGScheduler.scala:696)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed.apply(DAGScheduler.scala:696)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:696)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive.applyOrElse(DAGScheduler.scala:1420)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
scala>
我一直运行在独立模式下使用 Spark 1.2.0 并使用支持 Kerberos 的 CDH5 集群(Cloudera VM)。所以Spark应用不能是运行.
有关更多详细信息,请参阅以下来自 cloudera 的 link: http://www.cloudera.com/content/cloudera/en/documentation/core/latest/topics/sg_spark_auth.html
HTH ... 觉得有用请加分。谢谢