hdfs 例子

package hadoop;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;

public class HdfsClient {
	
	
	
	
	
	
	public static void main(String[] args) throws IOException, InterruptedException, URISyntaxException {
		FileSystem fs = null;
		Configuration conf = new Configuration();
		fs = FileSystem.get(new URI("hdfs://192.168.1.231:9000"), conf, "root");
		
		
		
		//上传
		//fs.copyFromLocalFile(  new Path("F:/新建文本文档.txt") , new Path("/") );
		
		
		//下载
		fs.copyToLocalFile( new Path( "/新建文本文档.txt" ), new Path( "F:/aaa" ));
		
		// 创建目录
		fs.mkdirs(new Path("/a1/b1/c1"));

		// 删除文件夹 ,如果是非空文件夹,参数2必须给值true
		fs.delete(new Path("/aaa"), true);

		// 重命名文件或文件夹
		fs.rename(new Path("/a1"), new Path("/a2"));
		
		
		RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/"), true);

		while (listFiles.hasNext()) {
			LocatedFileStatus fileStatus = listFiles.next();

			System.out.println(fileStatus.getPath().getName());
			System.out.println(fileStatus.getBlockSize());
			System.out.println(fileStatus.getPermission());
			System.out.println(fileStatus.getLen());
			BlockLocation[] blockLocations = fileStatus.getBlockLocations();
			for (BlockLocation bl : blockLocations) {
				System.out.println("block-length:" + bl.getLength() + "--" + "block-offset:" + bl.getOffset());
				String[] hosts = bl.getHosts();
				for (String host : hosts) {
					System.out.println(host);
				}
			}
			System.out.println("--------------分割线--------------");
		}

		
		
		FileStatus[] listStatus = fs.listStatus(new Path("/"));
		String flag = "d--             ";

		for (FileStatus fstatus : listStatus) {
			if (fstatus.isFile())
				flag = "f--         ";
			System.out.println(flag + fstatus.getPath().getName());
		}
		
		fs.close();
	}
	
	


}

  

依赖:

		<groupId>org.apache.hadoop</groupId>
			<artifactId>hadoop-client</artifactId>
			<version>2.8.5</version>
		</dependency>

  

原文地址:https://www.cnblogs.com/cxygg/p/9724846.html