WordCountTopology的實現

  1. 流程圖如下:


2. 編寫SentenceSpout

package com.ibeifeng.bigdata.storm.topo;

import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichSpout;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Map;
import java.util.Random;

/**
 * Spout開發
 * Created by ad on 2016/12/11.
 */
//public class SentenceSpout implements IRichSpout{
public class SentenceSpout extends BaseRichSpout{

    private static final Logger logger = LoggerFactory.getLogger(SentenceSpout.class);

    /**
     * tuple發射器
     */
    private SpoutOutputCollector collector;

    private static final String[] SENTENCES = {
            "hadoop yarn mapreduce spark",
            "flume hadoop hive spark",
            "oozie yarn spark storm",
            "storm yarn mapreduce error",
            "error flume storm spark"
    };

    /**
     * 用來聲明該組件向後面組件發射的tuple的key名稱依次是什麼
     * @param declarer
     */
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("sentence"));
    }

    /**
     * 用於指定只針對本組件的一些特殊配置
     * @return
     */
    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }

    /**
     * Spout 組件的初始化方法
     * 創建SentenceSpout組件的實例對象時調用,只執行一次
     * @param conf
     * @param context
     * @param collector
     */
    @Override
    public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {

        // 用實例變量來接收tuple發射器
        this.collector = collector;
    }

    /**
     * close方法在該spout關閉前執行,但是並不能得到保證其一定被執行。
     * spout是作爲task運行在worker內,在cluster模式下,
     * supervisor會直接kill -9 woker的進程,這樣它就無法執行了。
     * 而在本地模式下,只要不是kill -9, 如果是發送停止命令,
     * 是可以保證close的執行的。
     */
    @Override
    public void close() {
        // 收尾工作
    }

    /**
     * 在對應時刻暫時激活spout
     */
    @Override
    public void activate() {

    }

    /**
     * 在對應時刻暫時關閉spout
     */
    @Override
    public void deactivate() {

    }

    /**
     * Spout組件的核心方法
     * 循環調用
     * 1)如何從數據源上獲取數據 邏輯 寫在該方法中
     * 2)對獲取的數據進行一些簡單的處理
     * 3) 封裝tuple,並且向後面的bolt發射 (其實只能指定tuple的value值依次是什麼)
     */
    @Override
    public void nextTuple() {
        // 隨機從數組中獲取一一條語句(模擬從數據源中獲取數據)
        String sentence = SENTENCES[new Random().nextInt(SENTENCES.length)];

        if(sentence.contains("error")){
            logger.error("記錄有問題:" + sentence);
        }else{
            // 封裝成tuple
            this.collector.emit(new Values(sentence));
        }

        try {
            Thread.sleep(10000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }

    }

    /**
     * 傳入的Object其實是一個id,唯一表示一個tuple。
     * 該方法是這個id所對應的tuple被成功處理後執行
     * @param msgId
     */
    @Override
    public void ack(Object msgId) {

    }

    /**
     * 同ack,只不過是tuple處理失敗時執行
     * @param msgId
     */

    @Override
    public void fail(Object msgId) {

    }
}
  1. 編寫SplitBlot
package com.ibeifeng.bigdata.storm.topo;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.IRichBolt;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

import java.util.Map;

/**
 * Bolt開發
 * Created by ad on 2016/12/11.
 */
public class SplitBolt implements IRichBolt{

    /**
     * bolt組件中發射器
     */
    private OutputCollector collector;

    /**
     * Bolt組件的初始化方法
     *
     * @param stormConf
     * @param context
     * @param collector
     */
    @Override
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {

        this.collector = collector;
    }

    /**
     * 每接收到前面組件發射過來的tuple就調用一次
     *
     * bolt對數據處理邏輯寫在該方法中
     * 處理完後的數據封裝成tuple(value部分),繼續發射給後面的組件
     * 或者執行比如寫到數據庫、打印到文件等等操作(終點)
     *
     * @param input
     */
    @Override
    public void execute(Tuple input) {
        String sentence = input.getStringByField("sentence");

        if(sentence != null && !"".equals(sentence)){
            String[] words = sentence.split(" ");
            for (String word: words){
                this.collector.emit(new Values(word));
            }
        }
    }

    /**
     * cleanup方法在bolt被關閉的時候調用, 它應該清理所有被打開的資源。
     * 但是集羣不保證這個方法一定會被執行。比如執行task的機器down掉了,
     * 那麼根本就沒有辦法來調用那個方法。cleanup設計的時候是被用來在
     * local mode的時候才被調用(也就是說在一個進程裏面模擬整個storm集羣),
     * 並且你想在關閉一些topology的時候避免資源泄漏
     */
    @Override
    public void cleanup() {

    }

    /**
     * declareOutputFields定義一個叫做”word”的字段的
     * 該bolt/spout輸出的字段個數,供下游使用,在該bolt中的execute方法中,
     * emit發射的字段個數必須和聲明的相同,否則報錯:Tuple created with wrong
     * number of fields. Expected 2 fields but got 1 fields
     * @param declarer
     */
    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("word"));
    }

    @Override
    public Map<String, Object> getComponentConfiguration() {
        return null;
    }
}
  1. 編寫CountBlot
package com.ibeifeng.bigdata.storm.topo;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

import java.util.HashMap;
import java.util.Map;

/**
 * 單詞計數
 * Created by ad on 2016/12/11.
 */
public class CountBolt extends BaseRichBolt {

    private Map<String,Integer> counts;
    /**
     * bolt組件中發射器
     */
    private OutputCollector collector;

    /**
     * Bolt組件的初始化方法
     *
     * @param stormConf
     * @param context
     * @param collector
     */
    @Override
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {

        this.collector = collector;
        this.counts = new HashMap<>();
    }


    @Override
    public void execute(Tuple input) {

        String word = input.getStringByField("word");

        // 單詞的累計
        int count = 1;
        if(counts.containsKey(word)){
            count = counts.get(word) + 1;
        }

        counts.put(word, count);

        this.collector.emit(new Values(word, count));
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {
        declarer.declare(new Fields("word","count"));
    }
}
  1. 編寫PrintBlot
package com.ibeifeng.bigdata.storm.topo;

import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;

import java.util.Map;

/**
 * Created by ad on 2016/12/11.
 */
public class PrintBolt extends BaseRichBolt{
    @Override
    public void prepare(Map stormConf, TopologyContext context, OutputCollector collector) {

    }

    @Override
    public void execute(Tuple input) {
        String word = input.getStringByField("word");
        Integer count = input.getIntegerByField("count");

        System.err.println("單詞:" + word + ", ----> 累計出現次數:"+ count);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer declarer) {

    }
}

6.編寫測試程序WordCountTopology

package com.ibeifeng.bigdata.storm.topo;

import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;

/**
 * wordcountTopology
 * Created by ad on 2016/12/11.
 */
public class WordCountTopology {

    private static final String SPOUT_ID = "sentenceSpout";
    private static final String SPLIT_BOLT = "splitBolt";
    private static final String COUNT_BOLT = "countBolt";
    private static final String PRINT_BOLT = "printBolt";

    public static void main(String[] args) {
        // 構造Topology

        TopologyBuilder builder = new TopologyBuilder();

        builder.setSpout(SPOUT_ID,new SentenceSpout()); // 指定 Spout

        // 指定 SentenceSpout 向SplitBolt發射tuple  隨機分組
        builder.setBolt(SPLIT_BOLT, new SplitBolt()) //.localOrShuffleGrouping(SPOUT_ID)
                .shuffleGrouping(SPOUT_ID);

        builder.setBolt(COUNT_BOLT, new CountBolt()).fieldsGrouping(SPLIT_BOLT, new Fields("word"));

        builder.setBolt(PRINT_BOLT, new PrintBolt())
                .globalGrouping(COUNT_BOLT); // 全局分組


        Config conf = new Config();

        if(args == null || args.length == 0){
            // 本地執行
            LocalCluster localCluster = new LocalCluster();
            localCluster.submitTopology("wordcount", conf ,builder.createTopology());
        }else{
            // 提交到集羣上執行
            conf.setNumWorkers(1); // 指定使用多少個進程來執行該Topology
            try {
                StormSubmitter.submitTopology(args[0],conf, builder.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            }
        }
    }
}
發佈了49 篇原創文章 · 獲贊 34 · 訪問量 15萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章