Strom集羣搭建
1. 準備
Jdk strom的部分代碼用java編寫,需要依賴jdk,我用的是1.7
Python storm依賴python,如果系統自帶的python爲2.6以下版本需要升級,可以直接在終端輸入:python查看系統自帶python版本
Zookeeper strom使用zookeeper做服務協調,zookeeper安裝參考:http://blog.csdn.net/mapengbo521521/article/details/41777721
2. Python的安裝
在Linux下安裝Python的操作相當簡單,按如下步驟操作即可:
解壓:
tar zxvf Python-3.2.2.tgz
安裝:
cd Python-3.2.2
./configure
make
make install
此時輸入”python”命令,仍然顯示是舊版本的,這就需要創建軟連接:
cd /usr/bin
rm -rf python
ln -s /home/python/Python-3.2.2/python python
再次數據“python”可以看到已經是新版本的python
python
3. 安裝Storm
下載地址:http://mirrors.cnnic.cn/apache/storm/apache-storm-0.9.4/apache-storm-0.9.4.tar.gz
解壓
tar -zxf apache-storm-0.9.4.tar.gz
修改配置
cd /home/hadoop/apache-storm-0.9.4/conf
vim storm.yaml
##集羣使用的Zookeeper集羣地址
storm.zookeeper.servers:
- "hadoop1"
- "hadoop2"
- "hadoop3"
storm.zookeeper.port: 2181
##集羣的Nimbus機器的地址
nimbus.host: "hadoop1"
##Nimbus和Supervisor迚程用於存儲少量狀態,如jars、 confs等的本地磁盤目錄,需要提前創建該目錄並給以足夠的訪問權限
storm.local.dir: "/home/hadoop/storm-0.9.4/data"
##對於每個Supervisor工作節點,需要配置該工作節點可以運行的worker數量。每個worker佔用一個單獨的端口用於接收消息,該配置選項即用於定義哪些端口是可被worker使用。默認情況下,每個節點上可運行4個workers,分別在6700、 6701、 6702和6703端口上。
supervisor.slots.ports:
- 6700
- 6701
- 6702
- 6703
創建數據目錄
cd /home/hadoop/storm-0.9.4/
mkdir data
scp -rp storm-0.9.4/ root@hadoop2:/home/hadoop/
scp -rp storm-0.9.4/ root@hadoop3:/home/hadoop/
##編輯環境變量##
[grid@hadoop4 ~]$ vim /etc/profile
export STORM_HOME=/home/hadoop/storm-0.9.4
export PATH=$PATH:$STORM_HOME/bin
source /etc/profile
再配置從節點環境變量
##啓動Storm(確保zookeeper已經啓動)##
storm nimbus & ##在主節點上運行Nimbus後臺程序
storm supervisor & ##在工作節點上運行Supervisor後臺程序
storm supervisor &
storm ui & ##在主節點上運行UI程序,啓動後可以在瀏覽器上輸入http://主節點的ip:port(默認8080端口)
storm logviewer & ##在主節點上運行LogViewer程序,啓動後在UI上通過點擊相應的Woker來查看對應的工作日誌
[root@hadoop1 ]$ jps
2959 QuorumPeerMain
3310 logviewer
3414 Jps
3228 nimbus
3289 core
[root@hadoop2 ~]$ jps
2907 QuorumPeerMain
3215 Jps
3154 supervisor
[root@hadoop3 ~]$ jps
3248 Jps
2935 QuorumPeerMain
3186 supervisor
前面的啓動方式不能作爲後臺服務啓動,啓動Storm所有後臺服務:
> bin/storm nimbus >/dev/null 2>&1 &
> bin/storm supervisor>/dev/null 2>&1 &
> bin/storm ui >/dev/null 2>&1 &
> bin/storm logviewer > /dev/null 2>&1 &
訪問:http://hadoop1:8080
4. 測試
說明:需要引入/home/hadoop/storm-0.9.4/lib下的jar
1. Spout消息發送
package test.storm;
import java.util.Map;
import java.util.Random;
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
/**
* Spout起到和外界溝通的作用,他可以從一個數據庫中按照某種規則取數據,也可以從分佈式隊列中取任務
*
* @author Administrator
*
*/
@SuppressWarnings("serial")
publicclass SimpleSpoutextends BaseRichSpout{
//用來發射數據的工具類
private SpoutOutputCollectorcollector;
privatestatic String[]info =new String[]{
"comaple\t,12424,44w46,654,12424,44w46,654,",
"lisi\t,435435,6537,12424,44w46,654,",
"lipeng\t,45735,6757,12424,44w46,654,",
"hujintao\t,45735,6757,12424,44w46,654,",
"jiangmin\t,23545,6457,2455,7576,qr44453",
"beijing\t,435435,6537,12424,44w46,654,",
"xiaoming\t,46654,8579,w3675,85877,077998,",
"xiaozhang\t,9789,788,97978,656,345235,09889,",
"ceo\t,46654,8579,w3675,85877,077998,",
"cto\t,46654,8579,w3675,85877,077998,",
"zhansan\t,46654,8579,w3675,85877,077998,"};
Random random=new Random();
/**
* 初始化collector
*/
publicvoid open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
this.collector = collector;
}
/**
* 在SpoutTracker類中被調用,每調用一次就可以向storm集羣中發射一條數據(一個tuple元組),該方法會被不停的調用
*/
@Override
publicvoid nextTuple() {
try {
String msg = info[random.nextInt(11)];
//調用發射方法
collector.emit(new Values(msg));
//模擬等待100ms
Thread.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
/**
* 定義字段id,該id在簡單模式下沒有用處,但在按照字段分組的模式下有很大的用處。
* 該declarer變量有很大作用,我們還可以調用declarer.declareStream();來定義stramId,該id可以用來定義更加複雜的流拓撲結構
*/
@Override
publicvoid declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("source"));//collector.emit(new Values(msg));參數要對應
}
}
2. Bolt消息處理程序
package test.storm;
import backtype.storm.topology.BasicOutputCollector;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseBasicBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;
/**
* 接收噴發節點(Spout)發送的數據進行簡單的處理後,發射出去。
*
* @author Administrator
*
*/
@SuppressWarnings("serial")
publicclass SimpleBoltextends BaseBasicBolt {
publicvoid execute(Tuple input, BasicOutputCollector collector) {
try {
String msg = input.getString(0);
if (msg !=null){
//System.out.println("msg="+msg);
collector.emit(new Values(msg +"msg is processed!"));
}
} catch (Exception e) {
e.printStackTrace();
}
}
publicvoid declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("info"));
}
}
3. 程序入口main
package test.storm;
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.topology.TopologyBuilder;
/**
* 定義了一個簡單的topology,包括一個數據噴發節點spout和一個數據處理節點bolt。
*
* @author Administrator
*
*/
publicclass SimpleTopology {
publicstaticvoid main(String[] args) {
try {
//實例化TopologyBuilder類。
TopologyBuilder topologyBuilder =new TopologyBuilder();
//設置噴發節點並分配併發數,該併發數將會控制該對象在集羣中的線程數。
topologyBuilder.setSpout("SimpleSpout",new SimpleSpout(), 1);
//設置數據處理節點並分配併發數。指定該節點接收噴發節點的策略爲隨機方式。
topologyBuilder.setBolt("SimpleBolt",new SimpleBolt(), 3).shuffleGrouping("SimpleSpout");
Config config = new Config();
config.setDebug(true);
if (args !=null && args.length > 0) {
config.setNumWorkers(1);
StormSubmitter.submitTopology(args[0], config, topologyBuilder.createTopology());
} else {
//這裏是本地模式下運行的啓動代碼。
config.setMaxTaskParallelism(1);
LocalCluster cluster =new LocalCluster();
cluster.submitTopology("simple", config, topologyBuilder.createTopology());
}
} catch (Exception e) {
e.printStackTrace();
}
}
}