java操作HDFS

package com.lei.hadoop;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

public class main {
    public static void main(String[] args) throws Exception {

        //设置hdfs
        //URL.setURLStreamHandlerFactory(new FsUrlStreamHandlerFactory());
        //URL url = new URL("hdfs://192.168.23.145:9000/count.txt");
        //InputStream inputStream = url.openStream();
        //IOUtils.copyBytes(inputStream, System.out, 4096, true);

        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://192.168.23.145:9000");
        //conf.set()设置副本数
        FileSystem fileSystem = FileSystem.get(conf);

        // 在hdfs上面创建目录
        //boolean result = fileSystem.mkdirs(new Path("/msb"));
        //System.out.println(result);

        //判断文件或者目录是否存在
        //boolean exists = fileSystem.exists(new Path("/msb"));
        //System.out.println(exists);

        // 删除文件
        //boolean delete = fileSystem.delete(new Path("/msb"));
        //System.out.println(delete);

        //创建一个文件
        //FSDataOutputStream out = fileSystem.create(new Path("/hadoop.tar.gz"));
        //FileInputStream file = new FileInputStream("d:/hadoop-2.7.7.tar.gz");
        //IOUtils.copyBytes(file, out, 4096, true);
        
        //获取文件状态
        FileStatus[] fileStatuses = fileSystem.listStatus(new Path("/"));
        for (FileStatus fileStatus : fileStatuses) {
            System.out.println(fileStatus.getAccessTime());
            System.out.println(fileStatus.getBlockSize());
            System.out.println(fileStatus.getLen());
            System.out.println(fileStatus.getPath());
            System.out.println(fileStatus.getReplication());
            System.out.println(fileStatus.getGroup());
        }
    }
}
View Code
原文地址:https://www.cnblogs.com/leigepython/p/10606221.html