1蛋铆、Code -> GitHub
https://github.com/liufengji/hadoop_hdfs.git
2驱显、方法一
import static org.junit.Assert.*;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.NewAppWeightBooster;
import org.junit.Test;
import sun.nio.ch.Net;
//HDFS 獲取文件系統(tǒng)
@Test
public void initHDFS() throws Exception {
//1. 創(chuàng)建對象
Configuration configuretion = new Configuration();
//2. 設(shè)置參數(shù)
configuretion.set("fs.defaultFS","hdfs://node1:9000");
configuretion.set("dfs.replication", "3");
//3 獲取文件系統(tǒng)
FileSystem fs = FileSystem.get(configuretion);
//4 打印
System.out.println(fs.toString());
}
3诗芜、方法二
(1) 首先src根目錄下創(chuàng)建 core-site.xml ,配置信息如下
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定 HDFS 中 NameNode 的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://node1:9000</value>
</property>
<!-- 指定 hadoop 運行時產(chǎn)生文件的存儲目錄 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.8.2/data/tmp</value>
</property>
</configuration>
(2) 不用配置參數(shù)了埃疫,默認讀取core-site.xml 信息
//HDFS 獲取文件系統(tǒng)
@Test
public void initHDFS() throws Exception {
//1. 創(chuàng)建對象
Configuration configuretion = new Configuration();
//2 獲取文件系統(tǒng)
FileSystem fs = FileSystem.get(configuretion);
//3 打印
System.out.println(fs.toString());
}