c 使用libhdfs寫數據 配置blocksize

在libhdfs提供的hdfswrite基礎上修改好 自己的namenode  ip及端口
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "hdfs.h" 

int main(int argc, char **argv) {

    if (argc != 4) {
        fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
        exit(-1);
    }
    
    hdfsFS fs = hdfsConnect("192.168.1.1", 8020);
    if (!fs) {
        fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
        exit(-1);
    } 
 
    const char* writeFileName = argv[1];
    tSize fileTotalSize = strtoul(argv[2], NULL, 10);
    tSize bufferSize = strtoul(argv[3], NULL, 10);
   
    hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
    if (!writeFile) {
        fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
        exit(-2);
    }

    // data to be written to the file
    char* buffer = malloc(sizeof(char) * bufferSize);
    if(buffer == NULL) {
        return -2;
    }
    int i = 0;
    for (i=0; i < bufferSize; ++i) {
        buffer[i] = 'a' + (i%26);
    }
    
    // write to the file
    tSize nrRemaining;
    for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
        int curSize = ( bufferSize < nrRemaining ) ? bufferSize : (int)nrRemaining; 
        hdfsWrite(fs, writeFile, (void*)buffer, curSize); 
    }

    free(buffer);
    hdfsCloseFile(fs, writeFile);
    hdfsDisconnect(fs);

    return 0;
}

/**
 * vim: ts=4: sw=4: et:
 */

配置HADOOP_HOME  JAVA_HOME HADOOP_CONF_DIR 及CLASSPATH
HADOOP_HOME=/usr/home/hadoop
export HADOOP_HOME

for i in $HADOOP_HOME/*.jar ; do
    CLASSPATH=$CLASSPATH:$i
done

for i in $HADOOP_HOME/lib/*.jar ; do
    CLASSPATH=$CLASSPATH:$i
done

HADOOP_CONF_DIR=$HADOOP_HOME/conf
CLASSPATH=$CLASSPATH:$HADOOP_CONF_DIR
export  HADOOP_CONF_DIR CLASSPATH



注意CLASSPATH一定要包含$HADOOP_CONF_DIR
blocksize及其他屬性 可以在HADOOP_CONF_DIR中配置
詳細請見 $HADOOP_HOME/src/c++/libhdfs 下hdfs.c
hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags, 
                      int bufferSize, short replication, tSize blockSize){}

及 hdfsJniHelper.c中:
JNIEnv* getJNIEnv(void){}方法


編譯:
gcc  hdfs_write.c -I${HADOOP_HOME}/src/c++/libhdfs -I${JAVA_HOME}/include -I${JAVA_HOME}/include/linux -L${HADOOP_HOME}/libhdfs -lhdfs -o hdfs_write

hdfs_write <filename> <filesize> <buffersize>


發佈了72 篇原創文章 · 獲贊 9 · 訪問量 72萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章