用java运行Hadoop程序报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.

用 java 运行 Hadoop 例程报错:org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache. 所写代码如下:

package com.pcitc.hadoop;

import java.io.IOException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;

/**

  • 获取 HDFS 集群上所有节点名称
  • @author lenovo

*/
public class GetList {
public static void main(String[] args) throws IOException {
Configuration conf
= new Configuration();
conf.set(
"dfs.default.name", "hdfs://hadoopmaster:9000");
FileSystem fs
= FileSystem.get(conf);
DistributedFileSystem hdfs
= (DistributedFileSystem) fs;
DatanodeInfo[] dataNodeStats
= hdfs.getDataNodeStats();
String[] names
= new String[dataNodeStats.length];
for (int i = 0; i < dataNodeStats.length; i++) {
names[i]
= dataNodeStats[i].getHostName();
System.out.println(
"node" + i + "name" + names[i]);
}
}
}

执行之后报如下错误:

Exception in thread "main" java.lang.ClassCastException: org.apache.hadoop.fs.LocalFileSystem cannot be cast to org.apache.hadoop.hdfs.DistributedFileSystem
    at org.apache.hadoop.examples.FindFileOnHDFS.getHDFSNodes(FindFileOnHDFS.java:43)
    at org.apache.hadoop.examples.FindFileOnHDFS.main(FindFileOnHDFS.java:16)

原因是 DistributedFileSystem 和 LocalFileSystem 都是 FileSystem 的子类,FileSystem.get(conf) 得到的是 LocalFileSystem 的 instance, 这个类型应该是默认的,要获得 DistributedFileSystem,需要配置 conf 对象, 按照我的写法我觉得应该是配了 conf 对象了,但是还是保存,最后按照网上的说法进行相应修改就可以了。直接上修改后的代码如下 (注意红色部分):

package com.pcitc.hadoop;

import java.io.IOException;
import java.net.URI;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;

/**

  • 获取 HDFS 集群上所有节点名称
  • @author lenovo

*/
public class GetList {
public static void main(String[] args) throws IOException {
Configuration conf
= new Configuration();
// conf.set("dfs.default.name", "hdfs://hadoopmaster:9000");
String uri = "hdfs://hadoopmaster:9000";
FileSystem fs
= FileSystem.get(URI.create(uri), conf);
DistributedFileSystem hdfs
= (DistributedFileSystem) fs;
DatanodeInfo[] dataNodeStats
= hdfs.getDataNodeStats();
String[] names
= new String[dataNodeStats.length];
for (int i = 0; i < dataNodeStats.length; i++) {
names[i]
= dataNodeStats[i].getHostName();
System.out.println("node:" + i + ",name:" + names[i]);
}
}
}