HDFS的文件操作
格式化HDFS
命令:user@namenode:hadoop$ bin/hadoop namenode -format
啓動HDFS
命令:user@namenode:hadoop$ bin/start-dfs.sh
列出HDFS上的文件
命令:user@namenode:hadoop$ bin/hadoop dfs -ls
使用hadoop API
- <span style="">public List<String[]> GetFileBolckHost(Configuration conf, String FileName) {
- try {
- List<String[]> list = new ArrayList<String[]>();
- FileSystem hdfs = FileSystem.get(conf);
- Path path = new Path(FileName);
- FileStatus fileStatus = hdfs.getFileStatus(path);
- BlockLocation[] blkLocations = hdfs.getFileBlockLocations(
- fileStatus, 0, fileStatus.getLen());
- int blkCount = blkLocations.length;
- for (int i = 0; i < blkCount; i++) {
- String[] hosts = blkLocations[i].getHosts();
- list.add(hosts);
- }
- return list;
- } catch (IOException e) {
- e.printStackTrace();
- }
- return null;
- }</span>
在HDFS上創建目錄
命令:user@namenode:hadoop$ bin/hadoop dfs -mkdir /文件名
使用hadoop API
- <span style="">// 在HDFS新建文件
- public FSDataOutputStream CreateFile(Configuration conf, String FileName) {
- try {
- FileSystem hdfs = FileSystem.get(conf);
- Path path = new Path(FileName);
- FSDataOutputStream outputStream = hdfs.create(path);
- return outputStream;
- } catch (IOException e) {
- e.printStackTrace();
- }
- return null;
- }</span>
上傳一個文件到HDFS
命令:user@namenode:hadoop$ bin/hadoop dfs -put 文件名 /user/yourUserName/
使用hadoop API
- <span style="">// 上傳文件到HDFS
- public void PutFile(Configuration conf, String srcFile, String dstFile) {
- try {
- FileSystem hdfs = FileSystem.get(conf);
- Path srcPath = new Path(srcFile);
- Path dstPath = new Path(dstFile);
- hdfs.copyFromLocalFile(srcPath, dstPath);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }</span>
從 HDFS 中導出數據
命令:user@namenode:hadoop$ bin/hadoop dfs -cat foo
使用hadoop API
- <span style="">// 從HDFS讀取文件
- public void ReadFile(Configuration conf, String FileName) {
- try {
- FileSystem hdfs = FileSystem.get(conf);
- FSDataInputStream dis = hdfs.open(new Path(FileName));
- IOUtils.copyBytes(dis, System.out, 4096, false);
- dis.close();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }</span>
HDFS 的關閉
命令:user@namenode:hadoop$ bin/stop-dfs.sh
HDFS全局狀態信息
命令:bin/hadoop dfsadmin -report
我們可以得到一份全局狀態報告。這份報告包含了HDFS集羣的基本信息,當然也有每臺機器的一些情況。
以上講的都是本地操作HDFS,都是基於在ubuntu下並配置有hadoop環境下對HDFS的操作,作爲客戶端也可以在window系統下遠程的對 HDFS進行操作,其實原理基本上差不多,只需要集羣中namenode對外開放的IP和端口,就可以訪問到HDFS
- <span style="">/**
- * 對HDFS操作
- * @author yujing
- *
- */
- public class Write {
- public static void main(String[] args) {
- try {
- uploadTohdfs();
- readHdfs();
- getDirectoryFromHdfs();
- } catch (FileNotFoundException e) {
- e.printStackTrace();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
- public static void uploadTohdfs() throws FileNotFoundException, IOException {
- String localSrc = "D://qq.txt";
- String dst = "hdfs://192.168.1.11:9000/usr/yujing/test.txt";
- InputStream in = new BufferedInputStream(new FileInputStream(localSrc));
- Configuration conf = new Configuration();
- FileSystem fs = FileSystem.get(URI.create(dst), conf);
- OutputStream out = fs.create(new Path(dst), new Progressable() {
- public void progress() {
- System.out.println(".");
- }
- });
- System.out.println("上傳文件成功");
- IOUtils.copyBytes(in, out, 4096, true);
- }
- /** 從HDFS上讀取文件 */
- private static void readHdfs() throws FileNotFoundException, IOException {
- String dst = "hdfs://192.168.1.11:9000/usr/yujing/test.txt";
- Configuration conf = new Configuration();
- FileSystem fs = FileSystem.get(URI.create(dst), conf);
- FSDataInputStream hdfsInStream = fs.open(new Path(dst));
- OutputStream out = new FileOutputStream("d:/qq-hdfs.txt");
- byte[] ioBuffer = new byte[1024];
- int readLen = hdfsInStream.read(ioBuffer);
- while (-1 != readLen) {
- out.write(ioBuffer, 0, readLen);
- readLen = hdfsInStream.read(ioBuffer);
- }
- System.out.println("讀文件成功");
- out.close();
- hdfsInStream.close();
- fs.close();
- }
- /**
- * 以append方式將內容添加到HDFS上文件的末尾;注意:文件更新,需要在hdfs-site.xml中添<property><name>dfs.
- * append.support</name><value>true</value></property>
- */
- private static void appendToHdfs() throws FileNotFoundException,
- IOException {
- String dst = "hdfs://192.168.1.11:9000/usr/yujing/test.txt";
- Configuration conf = new Configuration();
- FileSystem fs = FileSystem.get(URI.create(dst), conf);
- FSDataOutputStream out = fs.append(new Path(dst));
- int readLen = "zhangzk add by hdfs java api".getBytes().length;
- while (-1 != readLen) {
- out.write("zhangzk add by hdfs java api".getBytes(), 0, readLen);
- }
- out.close();
- fs.close();
- }
- /** 從HDFS上刪除文件 */
- private static void deleteFromHdfs() throws FileNotFoundException,
- IOException {
- String dst = "hdfs://192.168.1.11:9000/usr/yujing";
- Configuration conf = new Configuration();
- FileSystem fs = FileSystem.get(URI.create(dst), conf);
- fs.deleteOnExit(new Path(dst));
- fs.close();
- }
- /** 遍歷HDFS上的文件和目錄 */
- private static void getDirectoryFromHdfs() throws FileNotFoundException,
- IOException {
- String dst = "hdfs://192.168.1.11:9000/usr/yujing";
- Configuration conf = new Configuration();
- FileSystem fs = FileSystem.get(URI.create(dst), conf);
- FileStatus fileList[] = fs.listStatus(new Path(dst));
- int size = fileList.length;
- for (int i = 0; i < size; i++) {
- System.out.println("文件名name:" + fileList[i].getPath().getName()
- + "文件大小/t/tsize:" + fileList[i].getLen());
- }
- fs.close();
- }
- }
- </span>
我們可以通過http://主機IP:50030就可以查看集羣的所有信息,也可以查看到自己上傳到HDFS上的文件