Lucene2.9加入PaodingAnalyzer2.0,并实现stopwords

 

 

最近开始学习Lucene,java版本的lucene,入门还是比较快的,网上也很多关于lucene的学习资料。我在这里总结一下。

1. 搭建Lucene的开发环境:在classpath中添加lucene-core-2.9.1.jar包,在csdn可以下载到该jar包

2.加入庖丁解牛jar包

 

我用是庖丁解牛 2.0.0版本

下载地址:http://code.google.com/p/paoding/downloads/list
SVN地址:http://paoding.googlecode.com/svn/trunk/paoding-analysis/

里面有lucene-highlighter-2.2.0.jar,   paoding-analysis.jar , commons-logging.jar , 相关的包要加入到classpath中

3。在lucene中加入庖丁解牛,配置文件

 

---------------------------------------------------------------

paoding-dic-home.properties 

 

#values are "system-env" or "this";
#if value is "this" , using the paoding.dic.home as dicHome if configed!
#paoding.dic.home.config-fisrt=system-env


paoding.dic.home.config-fisrt=this

 

#dictionary home (directory)
#"classpath:xxx" means dictionary home is in classpath.
#e.g "classpath:dic" means dictionaries are in "classes/dic" directory or any other classpath directory

//下载庖丁包有该字典文件,加入到项目的根目录
paoding.dic.home=/paoding/dic

 

#seconds for dic modification detection


paoding.dic.detector.interval=60

---------------------------------------------------------------

paoding-knives.properties 文件配置

 

paoding.knife.class.letterKnife=net.paoding.analysis.knife.LetterKnife
paoding.knife.class.numberKnife=net.paoding.analysis.knife.NumberKnife
paoding.knife.class.cjkKnife=net.paoding.analysis.knife.CJKKnife

---------------------------------------------------------------

 

 

附上代码:

 

import java.io.BufferedInputStream;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringReader;

import net.paoding.analysis.analyzer.PaodingAnalyzer;
import net.paoding.analysis.examples.gettingstarted.BoldFormatter;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.analysis.standard.StandardFilter;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.TermPositionVector;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Searcher;
import org.apache.lucene.search.highlight.Highlighter;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.TokenSources;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;

public class LuceneChinese {

 

//数据文件夹
 private static final String DATA_DIR="C://unicode";

//索引存放文件夹
 private static final String INDEX_DIR="C://dir";

//字段
 private static final String FIELD_NAME="content";
 
 public static void main(String [] args)throws Exception{


  createIndex();
  search("");
 }
 /**
  * 创建索引
  */
 public static void createIndex(){
  System.out.println("-------------------建立索引开始-----------------------");
  long timeStart=System.currentTimeMillis();
  try{
   // PaodingChineseAnalyzer实现Analyzer接口,继承PaodingAnalyze,重写tokenizer方法,实现过滤分词
   Analyzer analyzer = new PaodingChineseAnalyzer(new File("E://stopwords.txt"));
   IndexWriter writer = new IndexWriter(FSDirectory.open(new File(INDEX_DIR)),
     analyzer,true, IndexWriter.MaxFieldLength.LIMITED);
   // 根据指定的目录把该目录下所有txt文件索引起来 
   indexDoc(writer, new File(DATA_DIR));
   // 优化, 可以提高搜索速度。  
   writer.optimize();
   writer.close();
  }catch (IOException e) {
   // TODO: handle exception
   e.printStackTrace();
  }
  long timeEnd=System.currentTimeMillis();
  System.out.println("-------------------建立索引耗时: "+(timeEnd-timeStart)+" 毫秒-----------------------");
 }
 /**
  * 搜索
  * @param keyword
  * @throws IOException
  * @throws ParseException
  */
 public static void search(String queryString)
  throws IOException,ParseException{
  
  //输入搜索关键字
  if (queryString==null||queryString=="") {
   System.out.print("Search for:");
   InputStreamReader in=new InputStreamReader(System.in);
   BufferedReader reader=new BufferedReader(in);
   queryString=reader.readLine();
   if(queryString==""){
    System.exit(0);
   }
  }
  
  long timeStart=System.currentTimeMillis();

//读取索引文件
  Directory directory=FSDirectory.open(new File(INDEX_DIR));
  //PaodingChineseAnalyzer实现Analyzer接口,继承PaodingAnalyzer
  Analyzer analyzer = new PaodingChineseAnalyzer();
  IndexReader reader = IndexReader.open(directory, true);
  QueryParser parser = new QueryParser(FIELD_NAME, analyzer);
  Query query = parser.parse(queryString);
  //创建索引查询器
  Searcher searcher = new IndexSearcher(directory);
  query = query.rewrite(reader);
  Hits hits = searcher.search(query);
  
  //高亮显示标签,默认是<b></b>
  //SimpleHTMLFormatter shf = new SimpleHTMLFormatter("<span style="color:red" mce_style="color:red">", "</span>");
  BoldFormatter formatter = new BoldFormatter();
  //构造高亮器,指定高亮的格式,指定查询计分器
  Highlighter highlighter = new Highlighter(formatter, new QueryScorer(query));
  //设置块划分器
  highlighter.setTextFragmenter(new SimpleFragmenter(50));
  
  System.out.println("共搜索到: "+hits.length()+" 条资源");
  System.out.println("---------------------------------------------");
  for (int i = 0; i < hits.length(); i++) {
   String text = hits.doc(i).get(FIELD_NAME);
   String path = hits.doc(i).get("path");
   int maxNumFragmentsRequired = 5;
   String fragmentSeparator = "...";
   TermPositionVector tpv = (TermPositionVector) reader
     .getTermFreqVector(hits.id(i), FIELD_NAME);
   TokenStream tokenStream = TokenSources.getTokenStream(tpv);
   String result = highlighter.getBestFragments(tokenStream, text,
     maxNumFragmentsRequired, fragmentSeparator);
   System.out.println("/n文件路径:" + path);
   System.out.println("/n" + result);
  }
  reader.close();
  System.out.println("共搜索到: "+hits.length()+" 条资源");
  long timeEnd=System.currentTimeMillis();
  System.out.println("-------------------查询耗时: "+(timeEnd-timeStart)+" 毫秒-----------------------");
 }
 /** 
     * 对指定的目录进行索引 
     *  
     * @param writer 
     *            IndexWriter 
     * @param root 
     *            指定的目录 
     */ 
    private static void indexDoc(IndexWriter writer, File root) {  
        // 不去索引不能读的文件  
        if (root.canRead()) {  
            if (root.isDirectory()) {  
                File[] files = root.listFiles();  
                if (files.length != 0) {  
                    for (int i = 0; i < files.length; i++) {  
                        // 递归调用  
                        indexDoc(writer, files[i]);  
                    }  
                }  
            } else {  
                try {
                 // 文件的文本内容  
                 InputStream in=new FileInputStream(root);
                 byte b[]=new byte[in.available()];
                 in.read(b);
                    String content = new String(b,"GBK");
                    // 创建一个lucene document  
                    Document d = new Document();  
                    // 把文件的文本内容添加进来 进行索引,保存  
                    d.add(new Field(FIELD_NAME, content, Field.Store.YES,  
                            Field.Index.TOKENIZED,Field.TermVector.WITH_POSITIONS_OFFSETS));  
                    // 同时把path也加入进来,只存储,不索引  
                    d.add(new Field("path", root.getAbsolutePath(),  
                            Field.Store.YES, Field.Index.NOT_ANALYZED));  
                    // 把document写入索引文件  
                    writer.addDocument(d);  
                    System.out.println("add file: " + root.getAbsolutePath());  
                } catch (FileNotFoundException e) {  
                    System.out.println("file not found, ignored."); 
                    e.printStackTrace();
                } catch (IOException e) {  

                }  
            }  
        }  
    }

}

 

 PaodingChineseAnalyzer  代码

 

 

import java.io.File;
import java.io.Reader;
import java.util.Set;

import net.paoding.analysis.analyzer.PaodingAnalyzer;
import net.paoding.analysis.analyzer.PaodingTokenizer;

import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.StopFilter;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.WordlistLoader;

public class PaodingChineseAnalyzer extends PaodingAnalyzer{

 private static String[] stopWords = {

        "www","的","和","与","时","在",

        "是","被","所","那","这","有",

        "将","会","为","对","了","过",

        "去"};
  private Set stopSet;
 
  public PaodingChineseAnalyzer() {
         stopSet = StopFilter.makeStopSet(stopWords);
  }
  public PaodingChineseAnalyzer(String[] stopWords) {
         stopSet = StopFilter.makeStopSet(stopWords);
  }

//读取外部stopwords文件
  public PaodingChineseAnalyzer(File stopwordsFile){
   try{
    stopSet=WordlistLoader.getWordSet(stopwordsFile);
   }catch (Exception e) {
    e.printStackTrace();
   }
  }

//过滤分词
  public final TokenStream tokenStream(String fieldName, Reader reader) {
   TokenStream result = new PaodingTokenizer(reader, getKnife(),createTokenCollector());

//加入过滤分词方法,lucene也提供了很多过滤分词方法,可以选择使用
   result=new StopFilter(result, stopSet);
   result = new LowerCaseFilter(result);
      return result;
  }
 }

 

由于时间问题,说明和注释不是很清楚,如有什么疑问,大家可以留言讨论,相互学习。。

 

 

发布了33 篇原创文章 · 获赞 1 · 访问量 15万+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章